content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# Amir's roaming data
# Slope between each two months, average and from month 1 to 5
# Score: 0.67884
library(dplyr)
library(knitr)
library(RWeka)
RF <- make_Weka_classifier("weka/classifiers/trees/RandomForest")
NB <- make_Weka_classifier("weka/classifiers/bayes/NaiveBayes")
MLP <- make_Weka_classifier("weka/classifiers/functions/MultilayerPerceptron")
trainDf <- read.csv('data/train.csv')
testDf <- read.csv('data/test.csv')
contractRefDf <- read.csv('data/contract_ref.csv')
calendarRefDf <- read.csv('data/calendar_ref.csv')
dailyAggDf <- read.csv('data/daily_aggregate.csv')
roamingDf <- read.csv('data/roaming_monthly.csv')
trainDf$TARGET <- as.factor(trainDf$TARGET)
slope1 <- function(x){
as.double(x[5])-as.double(x[3])
}
slope2 <- function(x){
as.double(x[7])-as.double(x[5])
}
slope3 <- function(x){
as.double(x[9])-as.double(x[7])
}
slope4 <- function(x){
as.double(x[11])-as.double(x[9])
}
slope1to5 <- function(x){
(as.double(x[11])-as.double(x[3]))/4
}
avg_slope <- function(x){
return (as.double(x[13])+as.double(x[14])+as.double(x[15])+as.double(x[16]))/4
}
avg2_slope <- function(x){
return (as.double(x[12])+as.double(x[13])+as.double(x[14])+as.double(x[15]))/4
}
trainRoamDf <- trainDf
#prepare tain data
trainRoamDf$slop1 <- apply(trainRoamDf,1,slope1)
trainRoamDf$slop2 <- apply(trainRoamDf,1,slope2)
trainRoamDf$slop3 <- apply(trainRoamDf,1,slope3)
trainRoamDf$slop4 <- apply(trainRoamDf,1,slope4)
trainRoamDf$slop1to5 <- apply(trainRoamDf,1,slope1to5)
trainRoamDf$avg_slop <- apply(trainRoamDf,1,avg_slope)
testRoamDf <- testDf
##prepare test data
testRoamDf$slop1 <- apply(testRoamDf,1,slope1)
testRoamDf$slop2 <- apply(testRoamDf,1,slope2)
testRoamDf$slop3 <- apply(testRoamDf,1,slope3)
testRoamDf$slop4 <- apply(testRoamDf,1,slope4)
testRoamDf$slop1to5 <- apply(testRoamDf,1,slope1to5)
testRoamDf$avg_slop <- apply(testRoamDf,1,avg2_slope)
trainRoamDf[,"R206_USAGE"] <- 0
trainRoamDf[,"R206_SESSION_COUNT"] <- 0
trainRoamDf[,"R207_USAGE"] <- 0
trainRoamDf[,"R207_SESSION_COUNT"] <- 0
trainRoamDf[,"R208_USAGE"] <- 0
trainRoamDf[,"R208_SESSION_COUNT"] <- 0
trainRoamDf[,"R209_USAGE"] <- 0
trainRoamDf[,"R209_SESSION_COUNT"] <- 0
trainRoamDf[,"R210_USAGE"] <- 0
trainRoamDf[,"R210_SESSION_COUNT"] <- 0
testRoamDf[,"R206_USAGE"] <- 0
testRoamDf[,"R206_SESSION_COUNT"] <- 0
testRoamDf[,"R207_USAGE"] <- 0
testRoamDf[,"R207_SESSION_COUNT"] <- 0
testRoamDf[,"R208_USAGE"] <- 0
testRoamDf[,"R208_SESSION_COUNT"] <- 0
testRoamDf[,"R209_USAGE"] <- 0
testRoamDf[,"R209_SESSION_COUNT"] <- 0
testRoamDf[,"R210_USAGE"] <- 0
testRoamDf[,"R210_SESSION_COUNT"] <- 0
for (k in unique(roamingDf$CONTRACT_KEY)) {
orig <- roamingDf[roamingDf$CONTRACT_KEY==k,]
if (trainRoamDf[trainRoamDf$CONTRACT_KEY==k,] %>% nrow > 0) {
val <- orig[orig$CALL_MONTH_KEY == 206,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R206_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R206_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 207,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R207_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R207_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 208,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R208_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R208_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- val[val$CALL_MONTH_KEY == 209,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R209_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R209_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 210,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R210_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R210_SESSION_COUNT"] = val$SESSION_COUNT
}
}
else {
val <- orig[orig$CALL_MONTH_KEY == 206,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R206_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R206_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 207,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R207_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R207_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 208,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R208_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R208_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- val[val$CALL_MONTH_KEY == 209,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R209_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R209_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 210,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R210_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R210_SESSION_COUNT"] = val$SESSION_COUNT
}
}
}
trainRoamDf <- trainRoamDf %>% mutate(X206_SESSION_COUNT = X206_SESSION_COUNT - R206_SESSION_COUNT,
X206_USAGE = X206_USAGE - R206_USAGE,
X207_SESSION_COUNT = X207_SESSION_COUNT - R207_SESSION_COUNT,
X207_USAGE = X207_USAGE - R207_USAGE,
X208_SESSION_COUNT = X208_SESSION_COUNT - R208_SESSION_COUNT,
X208_USAGE = X208_USAGE - R208_USAGE,
X209_SESSION_COUNT = X209_SESSION_COUNT - R209_SESSION_COUNT,
X209_USAGE = X209_USAGE - R209_USAGE,
X210_SESSION_COUNT = X210_SESSION_COUNT - R210_SESSION_COUNT,
X210_USAGE = X210_USAGE - R210_USAGE)
testRoamDf <- testRoamDf %>% mutate(X206_SESSION_COUNT = X206_SESSION_COUNT - R206_SESSION_COUNT,
X206_USAGE = X206_USAGE - R206_USAGE,
X207_SESSION_COUNT = X207_SESSION_COUNT - R207_SESSION_COUNT,
X207_USAGE = X207_USAGE - R207_USAGE,
X208_SESSION_COUNT = X208_SESSION_COUNT - R208_SESSION_COUNT,
X208_USAGE = X208_USAGE - R208_USAGE,
X209_SESSION_COUNT = X209_SESSION_COUNT - R209_SESSION_COUNT,
X209_USAGE = X209_USAGE - R209_USAGE,
X210_SESSION_COUNT = X210_SESSION_COUNT - R210_SESSION_COUNT,
X210_USAGE = X210_USAGE - R210_USAGE)
myModel <- MLP(TARGET~X206_SESSION_COUNT + X206_USAGE +
X207_SESSION_COUNT + X207_USAGE +
X208_SESSION_COUNT + X208_USAGE +
X209_SESSION_COUNT + X209_USAGE +
X210_SESSION_COUNT + X210_USAGE +
R206_SESSION_COUNT + R206_USAGE +
R207_SESSION_COUNT + R207_USAGE +
R208_SESSION_COUNT + R208_USAGE +
R209_SESSION_COUNT + R209_USAGE +
R210_SESSION_COUNT + R210_USAGE+
slop1 + slop2 + slop3 + slop4+
slop1to5 + avg_slop
, data=trainRoamDf)
myTarget = predict(myModel, newdata = testRoamDf, type="class")
myResult <- data.frame(CONTRACT_KEY=testRoamDf$CONTRACT_KEY, PREDICTED_TARGET=myTarget)
write.table(myResult, file="output/slopeRoam.csv", sep =",", row.names= FALSE)
write.table(myResult, file="slopeRoam.csv", sep =",", row.names= FALSE) | /scripts/18-mimi0.67884/mimi0.67884.R | no_license | AmirGeorge/csen1061-data-science-project2 | R | false | false | 7,895 | r | # Amir's roaming data
# Slope between each two months, average and from month 1 to 5
# Score: 0.67884
library(dplyr)
library(knitr)
library(RWeka)
RF <- make_Weka_classifier("weka/classifiers/trees/RandomForest")
NB <- make_Weka_classifier("weka/classifiers/bayes/NaiveBayes")
MLP <- make_Weka_classifier("weka/classifiers/functions/MultilayerPerceptron")
trainDf <- read.csv('data/train.csv')
testDf <- read.csv('data/test.csv')
contractRefDf <- read.csv('data/contract_ref.csv')
calendarRefDf <- read.csv('data/calendar_ref.csv')
dailyAggDf <- read.csv('data/daily_aggregate.csv')
roamingDf <- read.csv('data/roaming_monthly.csv')
trainDf$TARGET <- as.factor(trainDf$TARGET)
slope1 <- function(x){
as.double(x[5])-as.double(x[3])
}
slope2 <- function(x){
as.double(x[7])-as.double(x[5])
}
slope3 <- function(x){
as.double(x[9])-as.double(x[7])
}
slope4 <- function(x){
as.double(x[11])-as.double(x[9])
}
slope1to5 <- function(x){
(as.double(x[11])-as.double(x[3]))/4
}
avg_slope <- function(x){
return (as.double(x[13])+as.double(x[14])+as.double(x[15])+as.double(x[16]))/4
}
avg2_slope <- function(x){
return (as.double(x[12])+as.double(x[13])+as.double(x[14])+as.double(x[15]))/4
}
trainRoamDf <- trainDf
#prepare tain data
trainRoamDf$slop1 <- apply(trainRoamDf,1,slope1)
trainRoamDf$slop2 <- apply(trainRoamDf,1,slope2)
trainRoamDf$slop3 <- apply(trainRoamDf,1,slope3)
trainRoamDf$slop4 <- apply(trainRoamDf,1,slope4)
trainRoamDf$slop1to5 <- apply(trainRoamDf,1,slope1to5)
trainRoamDf$avg_slop <- apply(trainRoamDf,1,avg_slope)
testRoamDf <- testDf
##prepare test data
testRoamDf$slop1 <- apply(testRoamDf,1,slope1)
testRoamDf$slop2 <- apply(testRoamDf,1,slope2)
testRoamDf$slop3 <- apply(testRoamDf,1,slope3)
testRoamDf$slop4 <- apply(testRoamDf,1,slope4)
testRoamDf$slop1to5 <- apply(testRoamDf,1,slope1to5)
testRoamDf$avg_slop <- apply(testRoamDf,1,avg2_slope)
trainRoamDf[,"R206_USAGE"] <- 0
trainRoamDf[,"R206_SESSION_COUNT"] <- 0
trainRoamDf[,"R207_USAGE"] <- 0
trainRoamDf[,"R207_SESSION_COUNT"] <- 0
trainRoamDf[,"R208_USAGE"] <- 0
trainRoamDf[,"R208_SESSION_COUNT"] <- 0
trainRoamDf[,"R209_USAGE"] <- 0
trainRoamDf[,"R209_SESSION_COUNT"] <- 0
trainRoamDf[,"R210_USAGE"] <- 0
trainRoamDf[,"R210_SESSION_COUNT"] <- 0
testRoamDf[,"R206_USAGE"] <- 0
testRoamDf[,"R206_SESSION_COUNT"] <- 0
testRoamDf[,"R207_USAGE"] <- 0
testRoamDf[,"R207_SESSION_COUNT"] <- 0
testRoamDf[,"R208_USAGE"] <- 0
testRoamDf[,"R208_SESSION_COUNT"] <- 0
testRoamDf[,"R209_USAGE"] <- 0
testRoamDf[,"R209_SESSION_COUNT"] <- 0
testRoamDf[,"R210_USAGE"] <- 0
testRoamDf[,"R210_SESSION_COUNT"] <- 0
for (k in unique(roamingDf$CONTRACT_KEY)) {
orig <- roamingDf[roamingDf$CONTRACT_KEY==k,]
if (trainRoamDf[trainRoamDf$CONTRACT_KEY==k,] %>% nrow > 0) {
val <- orig[orig$CALL_MONTH_KEY == 206,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R206_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R206_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 207,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R207_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R207_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 208,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R208_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R208_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- val[val$CALL_MONTH_KEY == 209,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R209_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R209_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 210,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R210_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R210_SESSION_COUNT"] = val$SESSION_COUNT
}
}
else {
val <- orig[orig$CALL_MONTH_KEY == 206,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R206_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R206_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 207,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R207_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R207_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 208,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R208_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R208_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- val[val$CALL_MONTH_KEY == 209,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R209_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R209_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 210,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R210_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R210_SESSION_COUNT"] = val$SESSION_COUNT
}
}
}
trainRoamDf <- trainRoamDf %>% mutate(X206_SESSION_COUNT = X206_SESSION_COUNT - R206_SESSION_COUNT,
X206_USAGE = X206_USAGE - R206_USAGE,
X207_SESSION_COUNT = X207_SESSION_COUNT - R207_SESSION_COUNT,
X207_USAGE = X207_USAGE - R207_USAGE,
X208_SESSION_COUNT = X208_SESSION_COUNT - R208_SESSION_COUNT,
X208_USAGE = X208_USAGE - R208_USAGE,
X209_SESSION_COUNT = X209_SESSION_COUNT - R209_SESSION_COUNT,
X209_USAGE = X209_USAGE - R209_USAGE,
X210_SESSION_COUNT = X210_SESSION_COUNT - R210_SESSION_COUNT,
X210_USAGE = X210_USAGE - R210_USAGE)
testRoamDf <- testRoamDf %>% mutate(X206_SESSION_COUNT = X206_SESSION_COUNT - R206_SESSION_COUNT,
X206_USAGE = X206_USAGE - R206_USAGE,
X207_SESSION_COUNT = X207_SESSION_COUNT - R207_SESSION_COUNT,
X207_USAGE = X207_USAGE - R207_USAGE,
X208_SESSION_COUNT = X208_SESSION_COUNT - R208_SESSION_COUNT,
X208_USAGE = X208_USAGE - R208_USAGE,
X209_SESSION_COUNT = X209_SESSION_COUNT - R209_SESSION_COUNT,
X209_USAGE = X209_USAGE - R209_USAGE,
X210_SESSION_COUNT = X210_SESSION_COUNT - R210_SESSION_COUNT,
X210_USAGE = X210_USAGE - R210_USAGE)
myModel <- MLP(TARGET~X206_SESSION_COUNT + X206_USAGE +
X207_SESSION_COUNT + X207_USAGE +
X208_SESSION_COUNT + X208_USAGE +
X209_SESSION_COUNT + X209_USAGE +
X210_SESSION_COUNT + X210_USAGE +
R206_SESSION_COUNT + R206_USAGE +
R207_SESSION_COUNT + R207_USAGE +
R208_SESSION_COUNT + R208_USAGE +
R209_SESSION_COUNT + R209_USAGE +
R210_SESSION_COUNT + R210_USAGE+
slop1 + slop2 + slop3 + slop4+
slop1to5 + avg_slop
, data=trainRoamDf)
myTarget = predict(myModel, newdata = testRoamDf, type="class")
myResult <- data.frame(CONTRACT_KEY=testRoamDf$CONTRACT_KEY, PREDICTED_TARGET=myTarget)
write.table(myResult, file="output/slopeRoam.csv", sep =",", row.names= FALSE)
write.table(myResult, file="slopeRoam.csv", sep =",", row.names= FALSE) |
# Introducing Oracle R Enterprise
# https://docs.oracle.com/cd/E57012_01/doc.141/e56973/intro.htm#OREUG187
library(OREbase)
library(OREcommon)
library(OREembed)
library(ORE)
library(DBI)
library(ROracle)
#https://blogs.oracle.com/R/entry/r_to_oracle_database_connectivity
#http://stackoverflow.com/questions/5339796/loading-an-r-package-from-a-custom-directory
install.packages("C:/oreclient_install_dir/client/*.zip", repos=NULL, type="source")
install.packages("C:/oreclient_install_dir/supporting/*.zip", repos=NULL, type="source")
install.packages("DBI")
install.packages("C:/Users/christoffer/Desktop/RScriptsForOracle/ROracle_1.1-12.tar", repos=NULL, type="source")
OREbase::factorial(x = 100)
?ore.connect
ore.connect(user = "hr", sid = "xe", host = "localhost", password = "admin", port = 1521)
OREbase::ore.is.connected()
?OREbase::ore.connect
?OREbase::ore.exec
OREbase::ore.exec(qry = "SELECT * FROM SI3_FONETIC")
# object <- print(OREbase::ore.exec(qry = "SELECT * FROM SI3_FONETIC"))
OREbase::ore.create(x = data.frame(x = c(1:10), row.names = c(1:10)), table = "ANY_TABLE")
ore.get("ANY_TABLE")
ore.exists("ANY_TABLE")
OREbase::ore.drop(table = "ANY_TABLE")
?interactive
?OREbase::ore.get
OREbase::ore.disconnect()
?Oracle # package ROracle
?dbDriver # package DBI
drive <- dbDriver("Oracle")
conn <- dbConnect(drv = drive, "hr", "admin")
table <- dbReadTable(conn, name = "SI3_FONETIC")
setwd(dir = "C:/Users/christoffer/Desktop/R-programming/")
f <- paste(getwd(), "/rscripts/RecordLinkageStudy/UtilsRecordLinkage.R", sep = "")
t <- file.exists(f)
ifelse(test = t, yes = source(file = f), no = q())
unix.time(expr = sapply(1:ncol(table), function(i) nrow(table[is.na(table[, i]), ])))
# user system elapsed
# 1.25 0.05 1.30
unix.time(table.notna <- listNotNa(X = table, fields = 1))
# user system elapsed
# 1.66 0.03 1.70
unix.time(table.notna <- clear.all.matrix(data = table.notna, fields = 2:ncol(table.notna)))
# user system elapsed
# 5.14 0.18 5.36
# demo(package = "ORE")
# teste com rJava
dbDisconnect(conn = conn);
.jinit()
vect <- .jnew("java/util/Vector")
| /rscripts/RO.R | no_license | yngcan/R-programming | R | false | false | 2,134 | r | # Introducing Oracle R Enterprise
# https://docs.oracle.com/cd/E57012_01/doc.141/e56973/intro.htm#OREUG187
library(OREbase)
library(OREcommon)
library(OREembed)
library(ORE)
library(DBI)
library(ROracle)
#https://blogs.oracle.com/R/entry/r_to_oracle_database_connectivity
#http://stackoverflow.com/questions/5339796/loading-an-r-package-from-a-custom-directory
install.packages("C:/oreclient_install_dir/client/*.zip", repos=NULL, type="source")
install.packages("C:/oreclient_install_dir/supporting/*.zip", repos=NULL, type="source")
install.packages("DBI")
install.packages("C:/Users/christoffer/Desktop/RScriptsForOracle/ROracle_1.1-12.tar", repos=NULL, type="source")
OREbase::factorial(x = 100)
?ore.connect
ore.connect(user = "hr", sid = "xe", host = "localhost", password = "admin", port = 1521)
OREbase::ore.is.connected()
?OREbase::ore.connect
?OREbase::ore.exec
OREbase::ore.exec(qry = "SELECT * FROM SI3_FONETIC")
# object <- print(OREbase::ore.exec(qry = "SELECT * FROM SI3_FONETIC"))
OREbase::ore.create(x = data.frame(x = c(1:10), row.names = c(1:10)), table = "ANY_TABLE")
ore.get("ANY_TABLE")
ore.exists("ANY_TABLE")
OREbase::ore.drop(table = "ANY_TABLE")
?interactive
?OREbase::ore.get
OREbase::ore.disconnect()
?Oracle # package ROracle
?dbDriver # package DBI
drive <- dbDriver("Oracle")
conn <- dbConnect(drv = drive, "hr", "admin")
table <- dbReadTable(conn, name = "SI3_FONETIC")
setwd(dir = "C:/Users/christoffer/Desktop/R-programming/")
f <- paste(getwd(), "/rscripts/RecordLinkageStudy/UtilsRecordLinkage.R", sep = "")
t <- file.exists(f)
ifelse(test = t, yes = source(file = f), no = q())
unix.time(expr = sapply(1:ncol(table), function(i) nrow(table[is.na(table[, i]), ])))
# user system elapsed
# 1.25 0.05 1.30
unix.time(table.notna <- listNotNa(X = table, fields = 1))
# user system elapsed
# 1.66 0.03 1.70
unix.time(table.notna <- clear.all.matrix(data = table.notna, fields = 2:ncol(table.notna)))
# user system elapsed
# 5.14 0.18 5.36
# demo(package = "ORE")
# teste com rJava
dbDisconnect(conn = conn);
.jinit()
vect <- .jnew("java/util/Vector")
|
context("select_regression_response_columns")
library(magrittr)
library(mnmacros)
#library(testthat)
rm(list = ls())
set.seed(0)
# single column char
y1 <- rep(c("a", "b"), c(5, 5))
expect_error(
data.frame(y1, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# single column factor
y2 <- rep(c("a", "b"), c(5, 5)) %>% factor()
expect_error(
data.frame(y2, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# single column numeric
y3 <- rnorm(10)
expect_error(
data.frame(y3, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# single column integer
y4 <- rnorm(10) %>% as.integer()
expect_error(
data.frame(y4, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# single column char low varance
y5 <- rep("a", 10)
expect_error(
data.frame(y5, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# single column factor low varance
y6 <- rep("a", 10) %>% factor()
expect_error(
data.frame(y6, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# single column numeric low varance
y7 <- rep(1, 10)
expect_error(
data.frame(y7, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# single column integer low varance
y8 <- rep(1, 10) %>% as.integer()
expect_error(
data.frame(y8, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# double column char
actual <-
data.frame(y1, y1, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c()
expect_equal(actual, expected)
# double column factor
actual <-
data.frame(y2, y2, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c()
expect_equal(actual, expected)
# double column numeric
actual <-
data.frame(y3, y3, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c("y3", "y3.1")
expect_equal(actual, expected)
# double column integer
actual <-
data.frame(y4, y4, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c("y4", "y4.1")
expect_equal(actual, expected)
# double column char low varance
actual <-
data.frame(y5, y5, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c()
expect_equal(actual, expected)
# double column factor low varance
actual <-
data.frame(y6, y6, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c()
expect_equal(actual, expected)
# double column numeric low varance
actual <-
data.frame(y7, y7, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c()
expect_equal(actual, expected)
# double column integer low varance
actual <-
data.frame(y8, y8, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c()
expect_equal(actual, expected)
# single column too small
y9 = rnorm(2)
expect_error(
data.frame(y9, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% nrow() >= 3",
fixed = T)
# double column too small
expect_error(
data.frame(y9, y9, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% nrow() >= 3",
fixed = T)
# mixed column
actual <-
data.frame(y1, y2, y3, y4, y5, y6, y7, y8, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c("y3", "y4")
expect_equal(actual, expected)
| /tests/testthat/test.select_regression_response_columns.r | permissive | aun-antonio/mndredge | R | false | false | 3,670 | r | context("select_regression_response_columns")
library(magrittr)
library(mnmacros)
#library(testthat)
rm(list = ls())
set.seed(0)
# single column char
y1 <- rep(c("a", "b"), c(5, 5))
expect_error(
data.frame(y1, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# single column factor
y2 <- rep(c("a", "b"), c(5, 5)) %>% factor()
expect_error(
data.frame(y2, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# single column numeric
y3 <- rnorm(10)
expect_error(
data.frame(y3, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# single column integer
y4 <- rnorm(10) %>% as.integer()
expect_error(
data.frame(y4, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# single column char low varance
y5 <- rep("a", 10)
expect_error(
data.frame(y5, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# single column factor low varance
y6 <- rep("a", 10) %>% factor()
expect_error(
data.frame(y6, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# single column numeric low varance
y7 <- rep(1, 10)
expect_error(
data.frame(y7, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# single column integer low varance
y8 <- rep(1, 10) %>% as.integer()
expect_error(
data.frame(y8, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% ncol() >= 2",
fixed = T)
# double column char
actual <-
data.frame(y1, y1, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c()
expect_equal(actual, expected)
# double column factor
actual <-
data.frame(y2, y2, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c()
expect_equal(actual, expected)
# double column numeric
actual <-
data.frame(y3, y3, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c("y3", "y3.1")
expect_equal(actual, expected)
# double column integer
actual <-
data.frame(y4, y4, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c("y4", "y4.1")
expect_equal(actual, expected)
# double column char low varance
actual <-
data.frame(y5, y5, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c()
expect_equal(actual, expected)
# double column factor low varance
actual <-
data.frame(y6, y6, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c()
expect_equal(actual, expected)
# double column numeric low varance
actual <-
data.frame(y7, y7, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c()
expect_equal(actual, expected)
# double column integer low varance
actual <-
data.frame(y8, y8, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c()
expect_equal(actual, expected)
# single column too small
y9 = rnorm(2)
expect_error(
data.frame(y9, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% nrow() >= 3",
fixed = T)
# double column too small
expect_error(
data.frame(y9, y9, stringsAsFactors = F) %>%
select_regression_response_columns(),
"data %>% nrow() >= 3",
fixed = T)
# mixed column
actual <-
data.frame(y1, y2, y3, y4, y5, y6, y7, y8, stringsAsFactors = F) %>%
select_regression_response_columns()
expected <-
c("y3", "y4")
expect_equal(actual, expected)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateSets.R
\name{generateSets}
\alias{generateSets}
\title{Generate multiple datasets}
\usage{
generateSets(n, klemms, species, samples, x, mode = "env", name)
}
\arguments{
\item{n}{number of replicates}
\item{x}{vector specificying environmental strength or removed species}
\item{mode}{"env" or "abundance", env takes environmental strength into account while "abundance" includes species removal}
\item{name}{filename of output dataset}
}
\value{
Returns nothing, but saves .rds files of datasets
}
\description{
Generate a list of list of datasets with n replicates for x datapoints.
}
\details{
Calls on the envGrowthChanges, generateDataSet and glv functions from seqtime to generate datasets compatible with other functions.
}
| /man/generateSets.Rd | no_license | ramellose/NetworkUtils | R | false | true | 820 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateSets.R
\name{generateSets}
\alias{generateSets}
\title{Generate multiple datasets}
\usage{
generateSets(n, klemms, species, samples, x, mode = "env", name)
}
\arguments{
\item{n}{number of replicates}
\item{x}{vector specificying environmental strength or removed species}
\item{mode}{"env" or "abundance", env takes environmental strength into account while "abundance" includes species removal}
\item{name}{filename of output dataset}
}
\value{
Returns nothing, but saves .rds files of datasets
}
\description{
Generate a list of list of datasets with n replicates for x datapoints.
}
\details{
Calls on the envGrowthChanges, generateDataSet and glv functions from seqtime to generate datasets compatible with other functions.
}
|
##
# Copyright (C) 2015 University of Virginia. All rights reserved.
#
# @file tcbuf-vs-tcrate.R
# @author Shawn Chen <sc7cq@virginia.edu>
# @version 1.0
# @date Feb 18, 2016
#
# @section LICENSE
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or(at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details at http://www.gnu.org/copyleft/gpl.html
#
# @brief Plot the buffer size against sending rate graph.
par(mar=c(6.1,6.5,4.1,2.1))
r_mc <- c(20, 30, 40, 50, 100, 200, 500)
plot(r_mc, bufvec, type='o', col='red', lwd=3,
xlab='Multicast rate r_mc (Mbps)', ylab='Minimum loss-free buffer size (MB)',
cex.lab=1.5, cex.axis=1.5)
grid()
| /mcast_lib/FMTP-LDM7/R/tcbuf-vs-tcrate.R | permissive | Unidata/LDM | R | false | false | 1,052 | r | ##
# Copyright (C) 2015 University of Virginia. All rights reserved.
#
# @file tcbuf-vs-tcrate.R
# @author Shawn Chen <sc7cq@virginia.edu>
# @version 1.0
# @date Feb 18, 2016
#
# @section LICENSE
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or(at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details at http://www.gnu.org/copyleft/gpl.html
#
# @brief Plot the buffer size against sending rate graph.
par(mar=c(6.1,6.5,4.1,2.1))
r_mc <- c(20, 30, 40, 50, 100, 200, 500)
plot(r_mc, bufvec, type='o', col='red', lwd=3,
xlab='Multicast rate r_mc (Mbps)', ylab='Minimum loss-free buffer size (MB)',
cex.lab=1.5, cex.axis=1.5)
grid()
|
#' Get segmented CNV data
#'
#' @param reports data frame with reports
#' @param columnToUse column to use for getting values
#' @return A data frame with segmented value, or other value
#' @examples
#' #dat <- getReadcountPerChr(reports)
#' #dat <- getReadcountPerChr(reports, columnToUse="segmented")
getQDNAseq <- function(reports, columnToUse="segmented"){
dat <- makeEmptyDataTable(header = c("chr", "start", "end", "gc", "mappability"))
for(k in 1:nrow(reports)){ #k <- 3
infile <- paste(reports$prefix[k] ,reports$WGS_TUMOR_QDNASEQ[k],sep="/")
if(file.exists(infile)){
tb <- fread(infile)
dat <- rbindlist(list(dat, data.table(tb$chromosome, tb$start, tb$end, tb$gc, tb$mappability)))
break
}
}
for(k in 1:nrow(reports)){ #k <- 3
infile <- paste(reports$prefix[k] ,reports$WGS_TUMOR_QDNASEQ[k],sep="/")
if(file.exists(infile)){
tb <- fread(infile)
dat[, eval(reports$REPORTID[k]) := tb[,columnToUse, with=FALSE ] ]
}else{
dat[, eval(reports$REPORTID[k]):=NA ]
}
dot(k, every=10)
}
dat
}
| /R/getQDNAseq.R | permissive | dakl/clinseqr | R | false | false | 1,083 | r | #' Get segmented CNV data
#'
#' @param reports data frame with reports
#' @param columnToUse column to use for getting values
#' @return A data frame with segmented value, or other value
#' @examples
#' #dat <- getReadcountPerChr(reports)
#' #dat <- getReadcountPerChr(reports, columnToUse="segmented")
getQDNAseq <- function(reports, columnToUse="segmented"){
dat <- makeEmptyDataTable(header = c("chr", "start", "end", "gc", "mappability"))
for(k in 1:nrow(reports)){ #k <- 3
infile <- paste(reports$prefix[k] ,reports$WGS_TUMOR_QDNASEQ[k],sep="/")
if(file.exists(infile)){
tb <- fread(infile)
dat <- rbindlist(list(dat, data.table(tb$chromosome, tb$start, tb$end, tb$gc, tb$mappability)))
break
}
}
for(k in 1:nrow(reports)){ #k <- 3
infile <- paste(reports$prefix[k] ,reports$WGS_TUMOR_QDNASEQ[k],sep="/")
if(file.exists(infile)){
tb <- fread(infile)
dat[, eval(reports$REPORTID[k]) := tb[,columnToUse, with=FALSE ] ]
}else{
dat[, eval(reports$REPORTID[k]):=NA ]
}
dot(k, every=10)
}
dat
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{papers_by_publication}
\alias{papers_by_publication}
\title{Papers by publication}
\usage{
papers_by_publication(url)
}
\arguments{
\item{url}{a OnePetro query URL}
}
\description{
Generate a summary by publications. These publications could
be World Petroleum Congress, Annual Technical
Meeting, SPE Unconventional Reservoirs Conference, etc.
}
\examples{
\dontrun{
# Example
my_url <- make_search_url(query = "industrial drilling", how = "all")
papers_by_publication(my_url)
}
}
| /man/papers_by_publication.Rd | no_license | libiner/petro.One | R | false | true | 575 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{papers_by_publication}
\alias{papers_by_publication}
\title{Papers by publication}
\usage{
papers_by_publication(url)
}
\arguments{
\item{url}{a OnePetro query URL}
}
\description{
Generate a summary by publications. These publications could
be World Petroleum Congress, Annual Technical
Meeting, SPE Unconventional Reservoirs Conference, etc.
}
\examples{
\dontrun{
# Example
my_url <- make_search_url(query = "industrial drilling", how = "all")
papers_by_publication(my_url)
}
}
|
#only run these tests if the rhdf5filters package is present
if(requireNamespace("rhdf5filters", quietly = TRUE)) {
library(rhdf5)
h5File <- tempfile(pattern = "ex_save", fileext = ".h5")
############################################################
context("Writing Using External Filters")
############################################################
fid <- H5Fcreate(h5File)
sid <- H5Screate_simple(dims = 2000, maxdims = 2000)
tid <- rhdf5:::.setDataType(H5type = NULL, storage.mode = "integer")
test_that("BZIP2 filter works for writing", {
expect_silent( dcpl <- H5Pcreate("H5P_DATASET_CREATE") )
expect_silent( H5Pset_fill_time( dcpl, "H5D_FILL_TIME_ALLOC" ) )
expect_silent( H5Pset_chunk( dcpl, 200) )
expect_silent( H5Pset_bzip2(dcpl) )
expect_silent( did <- H5Dcreate(fid, "bzip2", tid, sid, dcpl = dcpl) )
expect_silent( H5Dwrite(buf = 1:2000, h5dataset = did) )
expect_silent( H5Dclose(did) )
})
test_that("BLOSC filter works for writing", {
expect_silent( dcpl <- H5Pcreate("H5P_DATASET_CREATE") )
expect_silent( H5Pset_fill_time( dcpl, "H5D_FILL_TIME_ALLOC" ) )
expect_silent( H5Pset_chunk( dcpl, 200) )
expect_silent( H5Pset_blosc(dcpl, tid, method = 1L) )
expect_silent( did <- H5Dcreate(fid, "blosc_lz", tid, sid, dcpl = dcpl) )
expect_silent( H5Dwrite(buf = 1:2000, h5dataset = did) )
expect_silent( H5Dclose(did) )
})
H5Sclose(sid)
H5Fclose(fid)
############################################################
context("Reading Using External Filters")
############################################################
fid <- H5Fopen(h5File)
test_that("BZIP2 filter works when reading", {
expect_silent( did <- H5Dopen(fid, name = "bzip2") )
expect_equivalent( H5Dread(did), 1:2000)
## if compression worked the dataset should be smaller than 8000 bytes
expect_lt( H5Dget_storage_size(did), 4 * 2000 )
expect_silent( H5Dclose(did) )
})
test_that("BLOSC filter works when reading", {
expect_silent( did <- H5Dopen(fid, name = "blosc_lz") )
expect_equivalent( H5Dread(did), 1:2000)
## if compression worked the dataset should be smaller than 8000 bytes
expect_lt( H5Dget_storage_size(did), 4 * 2000 )
expect_silent( H5Dclose(did) )
})
H5Fclose(fid)
}
h5closeAll() | /tests/testthat/test_external_filters.R | no_license | MatthieuRouland/rhdf5 | R | false | false | 2,515 | r | #only run these tests if the rhdf5filters package is present
if(requireNamespace("rhdf5filters", quietly = TRUE)) {
library(rhdf5)
h5File <- tempfile(pattern = "ex_save", fileext = ".h5")
############################################################
context("Writing Using External Filters")
############################################################
fid <- H5Fcreate(h5File)
sid <- H5Screate_simple(dims = 2000, maxdims = 2000)
tid <- rhdf5:::.setDataType(H5type = NULL, storage.mode = "integer")
test_that("BZIP2 filter works for writing", {
expect_silent( dcpl <- H5Pcreate("H5P_DATASET_CREATE") )
expect_silent( H5Pset_fill_time( dcpl, "H5D_FILL_TIME_ALLOC" ) )
expect_silent( H5Pset_chunk( dcpl, 200) )
expect_silent( H5Pset_bzip2(dcpl) )
expect_silent( did <- H5Dcreate(fid, "bzip2", tid, sid, dcpl = dcpl) )
expect_silent( H5Dwrite(buf = 1:2000, h5dataset = did) )
expect_silent( H5Dclose(did) )
})
test_that("BLOSC filter works for writing", {
expect_silent( dcpl <- H5Pcreate("H5P_DATASET_CREATE") )
expect_silent( H5Pset_fill_time( dcpl, "H5D_FILL_TIME_ALLOC" ) )
expect_silent( H5Pset_chunk( dcpl, 200) )
expect_silent( H5Pset_blosc(dcpl, tid, method = 1L) )
expect_silent( did <- H5Dcreate(fid, "blosc_lz", tid, sid, dcpl = dcpl) )
expect_silent( H5Dwrite(buf = 1:2000, h5dataset = did) )
expect_silent( H5Dclose(did) )
})
H5Sclose(sid)
H5Fclose(fid)
############################################################
context("Reading Using External Filters")
############################################################
fid <- H5Fopen(h5File)
test_that("BZIP2 filter works when reading", {
expect_silent( did <- H5Dopen(fid, name = "bzip2") )
expect_equivalent( H5Dread(did), 1:2000)
## if compression worked the dataset should be smaller than 8000 bytes
expect_lt( H5Dget_storage_size(did), 4 * 2000 )
expect_silent( H5Dclose(did) )
})
test_that("BLOSC filter works when reading", {
expect_silent( did <- H5Dopen(fid, name = "blosc_lz") )
expect_equivalent( H5Dread(did), 1:2000)
## if compression worked the dataset should be smaller than 8000 bytes
expect_lt( H5Dget_storage_size(did), 4 * 2000 )
expect_silent( H5Dclose(did) )
})
H5Fclose(fid)
}
h5closeAll() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/disease_progression.R
\name{create_progression_process}
\alias{create_progression_process}
\title{Modelling the progression of the human disease}
\usage{
create_progression_process(
human,
from_state,
to_state,
rate,
infectivity,
new_infectivity
)
}
\arguments{
\item{human}{the handle for the human individuals}
\item{from_state}{the source disease state}
\item{to_state}{the destination disease state}
\item{rate}{the rate at which to move humans}
\item{infectivity}{the handle for the infectivity variable}
\item{new_infectivity}{the new infectivity of the progressed individuals}
}
\description{
Randomly moves individuals towards the later stages of disease
and updates their infectivity
}
| /man/create_progression_process.Rd | permissive | EllieSherrardSmith/malariasimulation | R | false | true | 790 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/disease_progression.R
\name{create_progression_process}
\alias{create_progression_process}
\title{Modelling the progression of the human disease}
\usage{
create_progression_process(
human,
from_state,
to_state,
rate,
infectivity,
new_infectivity
)
}
\arguments{
\item{human}{the handle for the human individuals}
\item{from_state}{the source disease state}
\item{to_state}{the destination disease state}
\item{rate}{the rate at which to move humans}
\item{infectivity}{the handle for the infectivity variable}
\item{new_infectivity}{the new infectivity of the progressed individuals}
}
\description{
Randomly moves individuals towards the later stages of disease
and updates their infectivity
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
## initialize inverse matrix
inv <- NULL
## set the matrix
set <- function(y) {
x <<- y
inv <<- NULL
}
## get the matrix
get <- function() x
## set the inverse matrix
setinverse <- function(inverse) inv <<- inverse
## get the inverse matrix
getinverse <- function() inv
## return value
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## try to get the cached inverse matrix
inv <- x$getinverse()
## if the inverse matrix exists, just return it
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
## otherwise calculate the inverse matrix via solve() funxtion
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
## return value
inv
}
## ==========================
## Testing Step
## ==========================
## ---------- Construct matrix ----------
# > a<-matrix(c(1,1,0,0,1,0,0,0,1),3,3)
# > a
# [,1] [,2] [,3]
# [1,] 1 0 0
# [2,] 1 1 0
# [3,] 0 0 1
# > z<-makeCacheMatrix(a)
## ---------- Calculate the inverse matrix ----------
# > cacheSolve(z)
# [,1] [,2] [,3]
# [1,] 1 0 0
# [2,] -1 1 0
# [3,] 0 0 1
## ---------- Assign inverse matrix ----------
# > inv<-matrix(c(1,0,0,-1,1,0,0,0,1),3,3)
# > z$setinverse(inv)
## ---------- Get cached inverse matrix ----------
# > cacheSolve(z)
## getting cached data
# [,1] [,2] [,3]
# [1,] 1 -1 0
# [2,] 0 1 0
# [3,] 0 0 1
| /project/PA2/cachematrix.R | no_license | shirleyrz/R-Programming | R | false | false | 2,097 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
## initialize inverse matrix
inv <- NULL
## set the matrix
set <- function(y) {
x <<- y
inv <<- NULL
}
## get the matrix
get <- function() x
## set the inverse matrix
setinverse <- function(inverse) inv <<- inverse
## get the inverse matrix
getinverse <- function() inv
## return value
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## try to get the cached inverse matrix
inv <- x$getinverse()
## if the inverse matrix exists, just return it
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
## otherwise calculate the inverse matrix via solve() funxtion
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
## return value
inv
}
## ==========================
## Testing Step
## ==========================
## ---------- Construct matrix ----------
# > a<-matrix(c(1,1,0,0,1,0,0,0,1),3,3)
# > a
# [,1] [,2] [,3]
# [1,] 1 0 0
# [2,] 1 1 0
# [3,] 0 0 1
# > z<-makeCacheMatrix(a)
## ---------- Calculate the inverse matrix ----------
# > cacheSolve(z)
# [,1] [,2] [,3]
# [1,] 1 0 0
# [2,] -1 1 0
# [3,] 0 0 1
## ---------- Assign inverse matrix ----------
# > inv<-matrix(c(1,0,0,-1,1,0,0,0,1),3,3)
# > z$setinverse(inv)
## ---------- Get cached inverse matrix ----------
# > cacheSolve(z)
## getting cached data
# [,1] [,2] [,3]
# [1,] 1 -1 0
# [2,] 0 1 0
# [3,] 0 0 1
|
rankhospital <- function(state,outcome,num = "best" )
{
##Read Data
directory <- list.files(path = "Data",full.names = TRUE,pattern = ".csv")
outcome_data <- read.csv(directory[2],stringsAsFactors = FALSE,na.strings = "Not Available")
outcome_v <- c("heart attack","heart failure", "pneumonia")
##Making a compact and useful data frame
useful_data <- outcome_data[outcome_data$State == state,c(7,2,11,17,23)]
##Check that state and coutcome are valid
if(!(state %in% outcome_data$State))
{
stop("invalid state")
}
else if(!(outcome %in% outcome_v))
{
stop("invalid outcome")
}
##Which hospital is the best
if(outcome == outcome_v[1])
{
ordered_data <- useful_data[order(as.numeric(useful_data[,3]),useful_data[,2]),]
final_data <- ordered_data[!is.na(ordered_data[,3]),]
if(num == "best")
{
return(final_data[1,2])
}
else if(num == "worst")
{
return(final_data[length(final_data[,3]),2])
}
else
{
return(final_data[num,2])
}
}
else if(outcome == outcome_v[2])
{
ordered_data <- useful_data[order(as.numeric(useful_data[,4]),useful_data[,2]),]
final_data <- ordered_data[!is.na(ordered_data[,4]),]
if(num == "best")
{
return(final_data[1,2])
}
else if(num == "worst")
{
return(final_data[length(final_data[,4]),2])
}
else
{
return(final_data[num,2])
}
}
else if(outcome == outcome_v[3])
{
ordered_data <- useful_data[order(as.numeric(useful_data[,5]),useful_data[,2]),]
final_data <- ordered_data[!is.na(ordered_data[,5]),]
if(num == "best")
{
return(final_data[1,2])
}
else if(num == "worst")
{
return(final_data[length(final_data[,5]),2])
}
else
{
return(final_data[num,2])
}
}
} | /rankhospital.R | no_license | migue28-git/Best-Hospital-in-USA | R | false | false | 1,915 | r | rankhospital <- function(state,outcome,num = "best" )
{
##Read Data
directory <- list.files(path = "Data",full.names = TRUE,pattern = ".csv")
outcome_data <- read.csv(directory[2],stringsAsFactors = FALSE,na.strings = "Not Available")
outcome_v <- c("heart attack","heart failure", "pneumonia")
##Making a compact and useful data frame
useful_data <- outcome_data[outcome_data$State == state,c(7,2,11,17,23)]
##Check that state and coutcome are valid
if(!(state %in% outcome_data$State))
{
stop("invalid state")
}
else if(!(outcome %in% outcome_v))
{
stop("invalid outcome")
}
##Which hospital is the best
if(outcome == outcome_v[1])
{
ordered_data <- useful_data[order(as.numeric(useful_data[,3]),useful_data[,2]),]
final_data <- ordered_data[!is.na(ordered_data[,3]),]
if(num == "best")
{
return(final_data[1,2])
}
else if(num == "worst")
{
return(final_data[length(final_data[,3]),2])
}
else
{
return(final_data[num,2])
}
}
else if(outcome == outcome_v[2])
{
ordered_data <- useful_data[order(as.numeric(useful_data[,4]),useful_data[,2]),]
final_data <- ordered_data[!is.na(ordered_data[,4]),]
if(num == "best")
{
return(final_data[1,2])
}
else if(num == "worst")
{
return(final_data[length(final_data[,4]),2])
}
else
{
return(final_data[num,2])
}
}
else if(outcome == outcome_v[3])
{
ordered_data <- useful_data[order(as.numeric(useful_data[,5]),useful_data[,2]),]
final_data <- ordered_data[!is.na(ordered_data[,5]),]
if(num == "best")
{
return(final_data[1,2])
}
else if(num == "worst")
{
return(final_data[length(final_data[,5]),2])
}
else
{
return(final_data[num,2])
}
}
} |
data<- read.csv("waveform_with_noise.csv",header=FALSE)
normalize <- function(x)
(
return((x-min(x))/(max(x)-min(x)))
)
data_feature<-data[,1:40]
data_n <- as.data.frame(lapply(data_feature[,1:40],normalize))
train_data <- data_n[1:4499,]
test_data <-data_n[4500:5000,]
train_data_class <- data[1:4499,41]
test_data_class <- data[4500:5000,41]
require(class)
library(class)
error<-NULL
m2 <- knn(train=train_data,test=test_data,cl=train_data_class,k=71)
x<-table(test_data_class,m2)
sum_diag <- sum(diag(x))
sum<-sum(x)
error <- c(error,1 - (sum_diag/sum))
cat("Error : ",error*100,"%") | /waveform_with_noise_script.R | no_license | mehtaaman2/R_scripts | R | false | false | 613 | r | data<- read.csv("waveform_with_noise.csv",header=FALSE)
normalize <- function(x)
(
return((x-min(x))/(max(x)-min(x)))
)
data_feature<-data[,1:40]
data_n <- as.data.frame(lapply(data_feature[,1:40],normalize))
train_data <- data_n[1:4499,]
test_data <-data_n[4500:5000,]
train_data_class <- data[1:4499,41]
test_data_class <- data[4500:5000,41]
require(class)
library(class)
error<-NULL
m2 <- knn(train=train_data,test=test_data,cl=train_data_class,k=71)
x<-table(test_data_class,m2)
sum_diag <- sum(diag(x))
sum<-sum(x)
error <- c(error,1 - (sum_diag/sum))
cat("Error : ",error*100,"%") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/catboost.R
\name{catboost.predict}
\alias{catboost.predict}
\title{Apply the model}
\usage{
catboost.predict(model, pool, verbose = FALSE,
prediction_type = "RawFormulaVal", ntree_start = 0, ntree_end = 0,
thread_count = 1)
}
\arguments{
\item{model}{The model obtained as the result of training.
Default value: Required argument}
\item{pool}{The input dataset.
Default value: Required argument}
\item{verbose}{Verbose output to stdout.
Default value: FALSE (not used)}
\item{prediction_type}{The format for displaying approximated values in output data
(see \url{https://tech.yandex.com/catboost/doc/dg/concepts/output-data-docpage/#output-data}).
Possible values:
\itemize{
\item 'Probability'
\item 'Class'
\item 'RawFormulaVal'
}
Default value: 'RawFormulaVal'}
\item{ntree_start}{Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).
Default value: 0}
\item{ntree_end}{Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).
Default value: 0 (if value equals to 0 this parameter is ignored and ntree_end equal to tree_count)}
\item{thread_count}{The number of threads to use when applying the model.
Allows you to optimize the speed of execution. This parameter doesn't affect results.
Default value: 1}
}
\description{
Apply the model to the given dataset.
}
\details{
Peculiarities: In case of multiclassification the prediction is returned in the form of a matrix.
Each line of this matrix contains the predictions for one object of the input dataset.
}
\seealso{
\url{https://tech.yandex.com/catboost/doc/dg/concepts/r-reference_catboost-predict-docpage/}
}
| /catboost/R-package/man/catboost.predict.Rd | permissive | exprmntr/test | R | false | true | 1,722 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/catboost.R
\name{catboost.predict}
\alias{catboost.predict}
\title{Apply the model}
\usage{
catboost.predict(model, pool, verbose = FALSE,
prediction_type = "RawFormulaVal", ntree_start = 0, ntree_end = 0,
thread_count = 1)
}
\arguments{
\item{model}{The model obtained as the result of training.
Default value: Required argument}
\item{pool}{The input dataset.
Default value: Required argument}
\item{verbose}{Verbose output to stdout.
Default value: FALSE (not used)}
\item{prediction_type}{The format for displaying approximated values in output data
(see \url{https://tech.yandex.com/catboost/doc/dg/concepts/output-data-docpage/#output-data}).
Possible values:
\itemize{
\item 'Probability'
\item 'Class'
\item 'RawFormulaVal'
}
Default value: 'RawFormulaVal'}
\item{ntree_start}{Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).
Default value: 0}
\item{ntree_end}{Model is applyed on the interval [ntree_start, ntree_end) (zero-based indexing).
Default value: 0 (if value equals to 0 this parameter is ignored and ntree_end equal to tree_count)}
\item{thread_count}{The number of threads to use when applying the model.
Allows you to optimize the speed of execution. This parameter doesn't affect results.
Default value: 1}
}
\description{
Apply the model to the given dataset.
}
\details{
Peculiarities: In case of multiclassification the prediction is returned in the form of a matrix.
Each line of this matrix contains the predictions for one object of the input dataset.
}
\seealso{
\url{https://tech.yandex.com/catboost/doc/dg/concepts/r-reference_catboost-predict-docpage/}
}
|
source("3_Script/1_Code/00_init.R")
tryCatch({
flog.info("Initial Setup", name = reportName)
source("3_Script/1_Code/01_Loading/Load_Invoice_Data.R")
load("1_Input/RData/packageBaseData.RData")
invoiceData <- LoadInvoiceData("1_Input/Pandu/01_Invoice")
mergedOMSData <- left_join(invoiceData,
packageBaseData,
by = "tracking_number")
rm(packageBaseData)
gc()
temp <- mergedOMSData
mergedOMSData %<>%
mutate(package_number = ifelse(is.na(package_number.y), package_number.x,
package_number.y)) %>%
select(-c(package_number.x, package_number.y))
mergedOMSData %<>%
mutate(existence_flag = ifelse(!is.na(RTS_Date), "OKAY", "NOT_OKAY"))
# Map Rate Card
source("3_Script/1_Code/03_Processing/Pandu/Pandu_MapRateCard.R")
mergedOMSData_rate <- MapRateCard(mergedOMSData, "1_Input/Pandu/02_Ratecards/PANDU_ratecard.xls")
# Rate Calculation
mergedOMSData_rate %<>%
replace_na(list(paidPrice = 0,
shippingFee = 0,
shippingSurcharge = 0))
mergedOMSData_rate %<>%
mutate(carrying_fee_laz = package_chargeable_weight * Price) %>%
mutate(insurance_fee_laz = ifelse((paidPrice + shippingFee + shippingSurcharge) < 1000000, 2500,
(paidPrice + shippingFee + shippingSurcharge) * 0.0025)) %>%
mutate(cod_fee_laz = ifelse(payment_method == "CashOnDelivery",
(paidPrice + shippingFee + shippingSurcharge) * 0.0185,
0))
mergedOMSData_rate %<>%
mutate(carrying_fee_flag = ifelse(carrying_fee_laz >= carrying_fee, "OKAY", "NOT_OKAY")) %>%
mutate(insurance_fee_flag = ifelse(insurance_fee_laz >= insurance_fee, "OKAY", "NOT_OKAY")) %>%
mutate(cod_fee_flag = ifelse(round(cod_fee_laz) + 1 >= round(cod_fee), "OKAY", "NOT_OKAY"))
# Duplicated Invoice Check
paidInvoiceData <- LoadInvoiceData("1_Input/Pandu/03_Paid_Invoice/")
paidInvoice <- NULL
paidInvoiceList <- NULL
if (!is.null(paidInvoiceData)) {
paidInvoice <- paidInvoiceData$tracking_number
paidInvoiceList <- select(paidInvoiceData, tracking_number, rawFile)
paidInvoiceList <- paidInvoiceList %>%
filter(!duplicated(tracking_number))
row.names(paidInvoiceList) <- paidInvoiceList$tracking_number
}
mergedOMSData_rate %<>%
mutate(Duplication_Flag=ifelse(duplicated(tracking_number),"Duplicated",
ifelse(tracking_number %in% paidInvoice,
"Duplicated","Not_Duplicated"))) %>%
mutate(DuplicationSource=ifelse(duplicated(tracking_number),"Self_Duplicated",
ifelse(tracking_number %in% paidInvoice,
paidInvoiceList[tracking_number,]$InvoiceFile,"")))
mergedOMSData_final <- mergedOMSData_rate %>%
select(-c(level_4_code, level_4_customer_address_region_type, level_4_fk_customer_address_region,
level_3_code, level_3_customer_address_region_type, level_3_fk_customer_address_region,
level_2_code, level_2_customer_address_region_type, level_2_fk_customer_address_region))
flog.info("Writing Result to csv format!!!", name = reportName)
invoiceFiles <- unique(mergedOMSData_rate$rawFile)
for (iFile in invoiceFiles) {
fileName <- gsub(".xls.*$", "_checked.csv", iFile)
fileData <- as.data.frame(mergedOMSData_rate %>% filter(rawFile == iFile))
write.csv2(fileData, file.path("2_Output/Pandu", fileName),
row.names = FALSE)
}
flog.info("Done", name = reportName)
},error = function(err){
flog.error(err, name = reportName)
flog.error("PLease send 3_Script/Log folder to Regional OPS BI for additional support",
name = reportName)
})
| /3_Script/1_Code/Pandu_InvoiceCheck_Batch.R | no_license | datvuong/ID_JNE_Invoice_Checking | R | false | false | 3,911 | r | source("3_Script/1_Code/00_init.R")
tryCatch({
flog.info("Initial Setup", name = reportName)
source("3_Script/1_Code/01_Loading/Load_Invoice_Data.R")
load("1_Input/RData/packageBaseData.RData")
invoiceData <- LoadInvoiceData("1_Input/Pandu/01_Invoice")
mergedOMSData <- left_join(invoiceData,
packageBaseData,
by = "tracking_number")
rm(packageBaseData)
gc()
temp <- mergedOMSData
mergedOMSData %<>%
mutate(package_number = ifelse(is.na(package_number.y), package_number.x,
package_number.y)) %>%
select(-c(package_number.x, package_number.y))
mergedOMSData %<>%
mutate(existence_flag = ifelse(!is.na(RTS_Date), "OKAY", "NOT_OKAY"))
# Map Rate Card
source("3_Script/1_Code/03_Processing/Pandu/Pandu_MapRateCard.R")
mergedOMSData_rate <- MapRateCard(mergedOMSData, "1_Input/Pandu/02_Ratecards/PANDU_ratecard.xls")
# Rate Calculation
mergedOMSData_rate %<>%
replace_na(list(paidPrice = 0,
shippingFee = 0,
shippingSurcharge = 0))
mergedOMSData_rate %<>%
mutate(carrying_fee_laz = package_chargeable_weight * Price) %>%
mutate(insurance_fee_laz = ifelse((paidPrice + shippingFee + shippingSurcharge) < 1000000, 2500,
(paidPrice + shippingFee + shippingSurcharge) * 0.0025)) %>%
mutate(cod_fee_laz = ifelse(payment_method == "CashOnDelivery",
(paidPrice + shippingFee + shippingSurcharge) * 0.0185,
0))
mergedOMSData_rate %<>%
mutate(carrying_fee_flag = ifelse(carrying_fee_laz >= carrying_fee, "OKAY", "NOT_OKAY")) %>%
mutate(insurance_fee_flag = ifelse(insurance_fee_laz >= insurance_fee, "OKAY", "NOT_OKAY")) %>%
mutate(cod_fee_flag = ifelse(round(cod_fee_laz) + 1 >= round(cod_fee), "OKAY", "NOT_OKAY"))
# Duplicated Invoice Check
paidInvoiceData <- LoadInvoiceData("1_Input/Pandu/03_Paid_Invoice/")
paidInvoice <- NULL
paidInvoiceList <- NULL
if (!is.null(paidInvoiceData)) {
paidInvoice <- paidInvoiceData$tracking_number
paidInvoiceList <- select(paidInvoiceData, tracking_number, rawFile)
paidInvoiceList <- paidInvoiceList %>%
filter(!duplicated(tracking_number))
row.names(paidInvoiceList) <- paidInvoiceList$tracking_number
}
mergedOMSData_rate %<>%
mutate(Duplication_Flag=ifelse(duplicated(tracking_number),"Duplicated",
ifelse(tracking_number %in% paidInvoice,
"Duplicated","Not_Duplicated"))) %>%
mutate(DuplicationSource=ifelse(duplicated(tracking_number),"Self_Duplicated",
ifelse(tracking_number %in% paidInvoice,
paidInvoiceList[tracking_number,]$InvoiceFile,"")))
mergedOMSData_final <- mergedOMSData_rate %>%
select(-c(level_4_code, level_4_customer_address_region_type, level_4_fk_customer_address_region,
level_3_code, level_3_customer_address_region_type, level_3_fk_customer_address_region,
level_2_code, level_2_customer_address_region_type, level_2_fk_customer_address_region))
flog.info("Writing Result to csv format!!!", name = reportName)
invoiceFiles <- unique(mergedOMSData_rate$rawFile)
for (iFile in invoiceFiles) {
fileName <- gsub(".xls.*$", "_checked.csv", iFile)
fileData <- as.data.frame(mergedOMSData_rate %>% filter(rawFile == iFile))
write.csv2(fileData, file.path("2_Output/Pandu", fileName),
row.names = FALSE)
}
flog.info("Done", name = reportName)
},error = function(err){
flog.error(err, name = reportName)
flog.error("PLease send 3_Script/Log folder to Regional OPS BI for additional support",
name = reportName)
})
|
## Authors
## Martin Schlather, schlather@math.uni-mannheim.de
##
##
## Copyright (C) 2015 Martin Schlather
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License
## as published by the Free Software Foundation; either version 3
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
PrepareModel2 <- function(model, ..., x=NULL) {
if (missing(model) || is.null(model)) stop("'model' must be given.")
method <- "ml"
if (class(model) == "RF_fit") model <- model[[method]]$model
else if (class(model) == "RFfit") model <- model[method]
m <- parseModel(model, ..., x=x)
if (notplus <- !(m[[1]] %in% ZF_PLUS)) m <- list(ZF_SYMBOLS_PLUS, m)
for (i in 2:length(m)) {
if ((m[[i]][[1]] %in% ZF_MIXED) && length(m[[i]]$X)==1 &&
is.numeric(m[[i]]$X) && m[[i]]$X==1 && !is.null(m[[i]]$b)) {
m[[i]] <- list(ZF_TREND[2], mean=m[[i]]$b)
if (RFoptions()$general$printlevel > PL_IMPORTANT)
message(paste("The '1' in the mixed model definition has been replaced by '", ZF_TREND[1], "(mean=", m[[i]]$mean, ")'.", sep=""))
}
}
if (notplus) m <- m[[2]]
class(m) <- "RM_model"
return(m)
# if (class(model) != "formula") {
# if (is.list(model)) return(model)
# else stop("model of unknown form -- maybe you have used an obsolete definition. See ?RMmodel for the model definition")
# }
# return(listmodel)
}
PrepareModel <- function(model, param, trend=NULL,
nugget.remove=TRUE, method=NULL) {
## any of the users model definition (standard, nested, list) for the
## covariance function is transformed into a standard format, used
## especially in the c programs
##
## overwrites in some situation the simulation method for nugget.
## allows trend to be NA (or any other non finite value -- is not checked!)
## trend has not been implemented yet!
if (is(model, ZF_MODEL))
stop("models of class ZF_MODEL cannot be combined with obsolete RandomFields functions")
if (!is.null(method)) stop("to give method in PrepareModel is obsolete")
if (!is.null(trend))
if (!is.numeric(trend) || length(trend)!=1)
stop("in the obsolete setting, only constant mean can used")
if (is.list(model) && is.character(model[[1]]) &&
(is.null(names(model)) || names(model)[[1]]=="")) {
if (!missing(param) && !is.null(param))
stop("param cannot be given in the extended definition")
if (is.null(trend)) return(model)
trend <- list(ZF_TREND[2], mean=trend)
if (model[[1]] %in% ZF_PLUS) return(c(model, list(trend)))
else return(list(ZF_SYMBOLS_PLUS, model, trend))
}
printlevel <- RFoptions()$general$printlevel
STOP <- function(txt) {
if (printlevel>=PL_ERRORS) {
cat("model: ")
if (!missing.model) Print(model) else cat(" missing.\n") #
cat("param: ")
if (!missing.param) Print(param) else cat(" missing.\n") #
cat("trend: ")
Print(trend) #
}
stop("(in PrepareModel) ", txt, call.=FALSE)
}
transform <- function(model) {
if (!is.list(model)) {
STOP("some elements of the model definition are not lists")
}
m <- list(DOLLAR[1], var=model$v)
lm <- length(model) - 3 # var, scale/aniso, name
if (!is.null(model$a)) m$aniso <- model$a else m$scale <- model$scale
## model <- c(model, if (!is.null(model$a))
## list(aniso=model$a) else list(scale=model$s)) ## ???
if (!is.na(p <- pmatch("meth", names(model), duplicates.ok=TRUE))) {
if (printlevel>=PL_ERRORS) Print(p, model) #
stop("method cannot be given with the model anymore. It must be given as a parameter to the function. See 'RFoptions' and 'RFsimulate'")
}
if (!is.null(model$me))
stop("'mean' seems to be given within the inner model definitions");
if (!is.character(model$m)) {
stop("'model' was not given extacly once each odd number of list entries or additional unused list elements are given.")
}
m1 <- list(model$m)
if (!is.null(model$k)) {
lm <- lm - 1
if (length(model$k) != 0)
for (i in 1:length(model$k)) {
eval(parse(text=paste("m1$k", i, " <- model$k[", i, "]", sep="")))
}
}
if (lm != 0) {
if (printlevel>=PL_ERRORS) Print(lm, model) #
stop("some parameters do not fit")
}
m <- c(m, list(m1))
return(m)
} # end transform
op.list <- c(ZF_SYMBOLS_PLUS, ZF_SYMBOLS_MULT) ## if others use complex list definition !
missing.model <- missing(model)
missing.param <- missing(param) || is.null(param)
if (missing.param && is.null(model$param)) { ## full model
if (RFoptions()$internal$warn_oldstyle)
warning("the sequential list format is depreciated.")
if (missing.model || (length(model)==0)) model <- list()
else if (!is.list(model))
STOP("if param is missing, model must be a list of lists (or a list in the extended notation)")
if (is.null(trend) + is.null(model$mean) + is.null(model$trend)<2)
STOP("trend/mean is given twice")
if (!is.null(model$mean)) trend <- model$mean else
if (!is.null(model$trend)) trend <- model$trend else trend <- NULL
model$trend <- model$mean <- NULL
## the definition might be given at a deeper level as element
## $model of the list:
if (is.list(model$model)) {
if (!is.list(model$model[[1]]))
STOP("if param is missing, the model$model must be a list of lists")
model <- model$model
}
if (length(model)==0) { ## deterministic
return(if (is.null(trend)) NULL else list(ZF_TREND[2], mean=trend))
}
if (length(model) %% 2 !=1) STOP("list for model definition should be odd")
if (length(model)==1)
return(if (is.null(trend) ||
is.numeric(trend) && length(trend)==1 && !is.na(trend)&&trend==0)
transform(model[[1]])
else list(ZF_SYMBOLS_PLUS, transform(model[[1]]),
list(ZF_TREND[2], mean=trend)));
op <- pmatch(c(model[seq(2, length(model), 2)], recursive=TRUE),
op.list, duplicates.ok=TRUE) - 1
if (!all(is.finite(op))) STOP("operators are not all allowed; see the extended list definition for extensions")
model <- model[seq(1, length(model), 2)]
plus <- which(op==0)
if (length(plus) == 0) {
m <- list("*", lapply(model, transform))
} else {
plus <- c(0, plus, length(op)+1)
m <- list(ZF_SYMBOLS_PLUS)
for (i in 1:(length(plus) - 1)) {
m[[i+1]] <-
if (plus[i] + 1 == plus[i+1]) transform(model[[plus[i] + 1]])
else list(ZF_SYMBOLS_MULT,
lapply(model[(plus[i] + 1) : plus[i+1]], transform))
}
}
model <- m
} else { ## standard definition or nested model
if (missing.param) { ## a simple list of the model and the
## parameters is also possible
if (is.null(param <- model$p)) STOP("is.null(model$param)")
stopifnot(is.null(trend) || is.null(model$trend))
if (is.null(trend)) trend <- model$trend
if (!is.null(model$mean)) {
if (!is.null(trend)) STOP("mean and trend given twice")
trend <- model$mean
}
model <- model$model
}
stopifnot(is.character(model), length(model)==1)
if (is.matrix(param)) { ## nested
if (nrow(param) == 1)
return(PrepareModel(model=model, param=c(param[1], 0, param[-1]),
trend=trend))
name <- model
model <- list(ZF_SYMBOLS_PLUS)#, method=method)
for (i in 1:nrow(param)) {
model <- c(model,
if (is.na(param[i, 2]) || param[i, 2] != 0)
list(list(DOLLAR[1], var=param[i, 1], scale=param[i, 2],
if (ncol(param) >2) list(name, k=param[i,-1:-2])
else list(name)))
else list(list(DOLLAR[1], var=param[i,1],
list(ZF_NUGGET[2]))))
}
} else if (is.vector(param)) { ## standard, simple way
## falls trend gegeben, dann ist param um 1 Komponente gekuerzt
if (is.null(trend)) {
trend <- param[1]
param <- param[-1]
} else message("It is assumed that no mean is given so that the first component of param is the variance")
if (model == ZF_NUGGET[2]) {
model <- transform(list(model=model, var=sum(param[1:2]), scale=1))
} else {
if (length(param) > 3)
model <- transform(list(model=model, var=param[1], scale=param[3],
k=param[-1:-3]))
else
model <- transform(list(model=model, var=param[1], scale=param[3]))
if (is.na(param[2]) || param[2] != 0 || !nugget.remove) {# nugget
model <- list(ZF_SYMBOLS_PLUS,
model,
transform(list(model=ZF_NUGGET[2], var=param[2], scale=1)))
}
## if (!is.null(method)) model <- c(model, method=method) ## doppelt
}
} else stop("unknown format") # end nested/standard definition
}
return(if (is.null(trend) ||
is.numeric(trend) && length(trend)==1 && !is.na(trend) &&trend==0)
return(model)
else if (model[[1]] %in% ZF_PLUS)
c(model, list(list(ZF_TREND[2], mean=trend)))
else list(ZF_SYMBOLS_PLUS, model, list(ZF_TREND[2], mean=trend)))
}
seq2grid <- function(x, name, grid, warn_ambiguous, gridtolerance) {
xx <- matrix(nrow=3, ncol=length(x))
step0 <- rep(FALSE, length(x))
gridnotgiven <- missing(grid) || length(grid) == 0
for (i in 1:length(x)) {
if (length(x[[i]]) == 1) {
xx[,i] <- c(x[[i]], 0, 1)
next
}
step <- diff(x[[i]])
if (step[1] == 0.0) {
ok <- step0[i] <- all(step == 0.0)
} else {
ok <- max(abs(step / step[1] - 1.0)) <= gridtolerance
}
if (!ok) {
if (gridnotgiven) return(FALSE)
if (!TRUE)
Print(i, x[[i]][1:min(100, length(x[[i]]))], #
step[1:min(100,length(step))],
range(diff(step[1:min(100,length(step))])))
stop("Different grid distances detected, but the grid must ",
"have equal distances in each direction -- if sure that ",
"it is a grid, increase the value of 'gridtolerance' which equals ",
gridtolerance,".\n")
}
xx[,i] <- c(x[[i]][1], step[1], if (step0[i]) 1 else length(x[[i]]))
}
if (FALSE && gridnotgiven && warn_ambiguous && length(x) > 1) {
RFoptions(internal.warn_ambiguous = FALSE)
message("Ambiguous interpretation of coordinates. Better give 'grid=TRUE' explicitly. (This message appears only once per session.)")
}
if (any(step0)) {
if (all(step0)) {
if (gridnotgiven) return(FALSE)
else stop("Within a grid, the coordinates must be distinguishable")
} else {
if (gridnotgiven && warn_ambiguous) {
RFoptions(internal.warn_ambiguous = FALSE)
warning("Interpretation as degenerated grid. Better give 'grid' explicitely. (This warning appears only once per session.)")
}
}
}
return(xx)
}
CheckXT <- function(x, y=NULL, z=NULL, T=NULL, grid, distances=NULL,
dim=NULL, # == spatialdim!
length.data,
y.ok = FALSE,
printlevel = RFoptions()$general$printlevel){
## do not pass anything on "..." ! --- only used for internal calls
## when lists are re-passed
## converts the given coordinates into standard formats
## (one for arbitrarily given locations and one for grid points)
#print("CheckXT in convert.R")#Berreth
if (!missing(x)) {
if (is(x, "CheckXT")) return(x)
if (is.list(x)) {
if (!is.list(x[[1]])) return(do.call("CheckXT", x))
L <- list()
for (i in 1:length(x)) {
L[[i]] <-
if (is(x[[i]], "CheckXT")) x[[i]] else do.call("CheckXT", x[[i]])
}
if (length(x) > 1) {
if (!all(diff(sapply(L, function(x) x$Zeit)) == 0) ||
!all(diff(sapply(L, function(x) x$spatialdim)) == 0))
stop("all sets must have the same dimension")
if (!all(diff(sapply(L, function(x) x$dist.given)) == 0))
stop("either all the sets must be based on distances or none")
}
class(L) <- "CheckXT"
return(L)
}
}
RFopt <- RFoptions()
curunits <- RFopt$coords$coordunits
newunits <- RFopt$coords$new_coordunits
coord_system <- RFopt$coords$coord_system
new_coord_system <- RFopt$coords$new_coord_system
ex.red <- RFopt$internal$examples_reduced
if (!missing(distances) && !is.null(distances)) { ## length==0 OK!
stopifnot(is.matrix(distances) || (!missing(dim) && !is.null(dim)),
(missing(grid) || length(grid) == 0),
missing(x) || is.null(x),
length(y)==0,
length(z)==0,
length(T)==0)
if (coord_system != new_coord_system && new_coord_system != "keep")
stop("coordinate systems differ")
if (is.list(distances)) {
L <- list()
for (i in 1:length(distances))
L[[i]] <- do.call("CheckXT", list(distances=distances[[i]], dim=dim))
class(L) <- "CheckXT"
return(L)
}
if (class(distances) == "dist") {
x <- as.vector(distances)
len <- length(distances)
} else if (is.matrix(distances) || is.vector(distances)) {
if (is.matrix(distances)) {
len <- nrow(distances)
if (is.null(dim)) dim = ncol(distances)
else if (dim != ncol(distances))
stop("matrix of distances does not fit the given dimension")
} else {
len <- length(distances)
if (is.null(dim))
stop("dim is not given although 'distances' are used")
}
x <- distances
} else {
stop("'distances' not of required format.")
}
if (ex.red && len > ex.red^2 / 2) {
LEN <- as.integer(ex.red)
len <- as.integer(LEN * (LEN - 1) / 2)
x <- if (is.matrix(x)) x[1:len ,] else x[1:len]
} else {
LEN <- as.integer(1e-9 + 0.5 * (1 + sqrt(1 + 8 * len)))
if (LEN * (LEN-1) / 2 != len) LEN <- NaN
}
## keep exactly the sequence up to 'distances'
if (storage.mode(x) != "double") storage.mode(x) <- "double"
L <- list(x = as.matrix(x), #0
y = double(0), #1
T= double(0), #2
grid = FALSE, #3
spatialdim=as.integer(dim),#4
Zeit=FALSE, #5
dist.given = TRUE, #6
restotal = LEN, ## number of points
l = LEN, ## ?? physical length??
coordunits = curunits,
new_coordunits = newunits
)
class(L) <- "CheckXT"
return(L)
}
stopifnot(!missing(x))
if (is(x, "RFsp") || isSpObj(x)) {
return(CheckXT(x=coordinates(x), y=y, z=z, T=T, grid=grid,
distances=distances, dim=dim, length.data=length.data,
y.ok=y.ok, printlevel=printlevel))
}
if (is.raster(x)) x <- as(x, 'GridTopology')
if ((missing(grid) || length(grid) == 0) && !missing(length.data)) {
new <- try(CheckXT(x=x, y=y, z=z, T=T, grid=TRUE, distances=distances,
dim=if (!missing(dim)) dim,
length.data = length.data, y.ok =y.ok,
printlevel = printlevel
), silent=TRUE)
if (grid <- (class(new) != "try-error")) {
ratio <- length.data / new$restotal
if (grid <- ratio == as.integer(ratio)) {
if (printlevel>=PL_IMPORTANT && new$spatialdim > 1)
message("Grid detected. If it is not a grid, set grid=FALSE.\n")
}
}
return(if (grid) new else {
CheckXT(x, y, z, T, grid=FALSE, distances,
if (!missing(distances) && length(distances) > 0) dim=1,
length.data = length.data,
printlevel = printlevel) }
)
} # if (missing(grid) && !missing(length.data))
gridtriple <- FALSE
if (is.GridTopology <- is(x, "GridTopology")){
x <- rbind(x@cellcentre.offset,
x@cellsize,
x@cells.dim)
if ((missing(grid) || length(grid) == 0)) grid <- TRUE else stopifnot(grid)
gridtriple <- TRUE
}
##else {
## is.GridTopology <- FALSE
##}
if (is.data.frame(x)) {
if (ncol(x)==1) x <- as.vector(x) else x <- as.matrix(x)
}
stopifnot(length(x) != 0)
# stopifnot(all(unlist(lapply(as.list(x), FUN=function(li) is.numeric(li))))) ## wann benoetigt???
stopifnot(is.numeric(x))# um RFsimulte(model, data) statt data=data abzufangen
# stopifnot(all(is.finite(x)), all(is.finite(y)), all(is.finite(z))) ; s.u. unlist
if (is.matrix(x)) {
if (!is.numeric(x)) stop("x is not numeric.")
if (length(z)!=0) stop("If x is a matrix, then z may not be given")
if (length(y)!=0) {
if (!y.ok) stop("If x is a matrix, then y may not be given")
if (length(T)!=0)
stop("If x is a matrix and y is given, then T may not be given")
if (!is.matrix(y) || ncol(y) != ncol(x) ||
nrow(x)==3 && nrow(y)!=3 && ((missing(grid) || length(grid) == 0) ||
grid))
stop("y does not match x (it must be a matrix)")
}
if (coord_system == COORD_SYS_NAMES[coord_auto + 1] && ncol(x) >= 2
&& ncol(x) <= 3 && !is.null(n <- dimnames(x)[[2]])) {
if (any(idx <- earth_coordinate_names(n))) {
if (length(idx) == 2 && !all(idx == 1:2))
stop("earth coordinates not in order longitude/latitude")
cur <- curunits[1]
newunits <- RFopt$coords$new_coordunits
curunits <- RFopt$coords$coordunits
curunits[1:2] <- ZF_EARTHCOORD_NAMES[1:2]
if (newunits[1] == "") newunits[1] <- UNITS_NAMES[units_km + 1]
newunits[2:3] <- newunits[1]
if (RFopt$internal$warn_coordinates)
message("\n\nNOTE: current units are ",
if (cur=="") "not given and" else paste("'", cur, "', but"),
" earth coordinates detected:\n",
"earth coordinates will be transformed into units of '",
newunits[1],
"'.\nIn particular, the values of all scale parameters of ",
"any model defined\nin R^3 (currently all models!) are ",
"understood in units of '", newunits[1],
"'.\nChange options 'coord_system' and/or 'units' if ",
"necessary.\n(This message appears only once per session.)\n")
coord_system <- COORD_SYS_NAMES[earth + 1]
RFoptions(coords.coord_system = coord_system,
coords.coordunits = curunits,
coords.new_coordunits = newunits,
internal.warn_coordinates=FALSE)
} else {
RFoptions(coords.coord_system = COORD_SYS_NAMES[cartesian + 1])
}
}
spatialdim <- ncol(x)
len <- nrow(x)
if (spatialdim==1 && len != 3 && (missing(grid) || length(grid) == 0)) {
if (length(x) <= 2) grid <- TRUE
else {
dx <- diff(x)
grid <- max(abs(diff(dx))) < dx[1] * RFopt$general$gridtolerance
}
} # else {
if ((missing(grid) || length(grid) == 0) &&
any(apply(x, 2, function(z) (length(z) <= 2) || max(abs(diff(diff(z))))
> RFopt$general$gridtolerance))) {
grid <- FALSE
}
if ((missing(grid) || length(grid) == 0) || !is.logical(grid)) {
grid <- TRUE
if (spatialdim > 1 && RFopt$internal$warn_ambiguous) {
RFoptions(internal.warn_ambiguous = FALSE)
warning("Ambiguous interpretation of the coordinates. Better give the logical parameter 'grid=TRUE' explicitely. (This warning appears only once per session.)")
}
}
if (grid && !is.GridTopology) {
if (gridtriple <- len==3) {
if (printlevel >= PL_SUBIMPORTANT && RFopt$internal$warn_oldstyle) {
message("x was interpreted as a gridtriple; the new gridtriple notation is:\n 1st row of x is interpreted as starting values of sequences,\n 2nd row as step,\n 3rd row as number of points (i.e. length),\n in each of the ", ncol(x), " directions.")
}
} else len <- rep(len, times=spatialdim) # Alex 8.10.2011
}
if (grid && !gridtriple) {
## list with columns as list elements -- easier way to
## do it??
x <- lapply(apply(x, 2, list), function(r) r[[1]])
if (length(y) != 0) y <- lapply(apply(y, 2, list), function(r) r[[1]])
}
} else { ## x, y, z given separately
if (length(y)==0 && length(z)!=0) stop("y is not given, but z")
xyzT <- list(x=if (!missing(x)) x, y=y, z=z, T=T)
for (i in 1:4) {
if (!is.null(xyzT[[i]]) && !is.numeric(xyzT[[i]])) {
if (printlevel>PL_IMPORTANT)
message(names(xyzT)[i],
" not being numeric it is converted to numeric")
assign(names(xyzT)[i], as.numeric(xyzT[[i]]))
}
}
remove(xyzT)
spatialdim <- 1 + (length(y)!=0) + (length(z)!=0)
if (spatialdim==1 && ((missing(grid) || length(grid) == 0) || !grid)) {
## ueberschreibt Einstellung des Nutzers im Falle d=1
if (length(x) <= 2) newgrid <- TRUE
else {
dx <- diff(x)
newgrid <- max(abs(diff(dx))) < dx[1] * RFopt$general$gridtolerance
}
if ((missing(grid) || length(grid) == 0)) grid <- newgrid
else if (xor(newgrid, grid) && RFopt$internal$warn_on_grid) {
RFoptions(internal.warn_on_grid = FALSE)
message("coordinates", if (grid) " do not",
" seem to be on a grid, but grid = ", grid)
}
}
len <- c(length(x), length(y), length(z))[1:spatialdim]
if (!(missing(grid) || length(grid) == 0) && !grid) { ## sicher nicht grid, ansonsten ausprobieren
if (any(diff(len) != 0)) stop("some of x, y, z differ in length")
x <- cbind(x, y, z)
## make a matrix out of the list
len <- len[1]
} else {
if ((missing(grid) || length(grid) == 0) && any(len != len[1]))
grid <- TRUE
x <- list(x, y, z)[1:spatialdim]
}
y <- z <- NULL ## wichtig dass y = NULL ist, da unten die Abfrage
} ## end of x, y, z given separately
if (!all(is.finite(unlist(x)))) {
stop("coordinates are not all finite")
}
if ((missing(grid) || length(grid) == 0) || grid) {
if (gridtriple) {
if (len != 3)
stop("In case of simulating a grid with option gridtriple, exactly 3 numbers are needed for each direction")
lr <- x[3,] # apply(x, 2, function(r) length(seq(r[1], r[2], r[3])))
##x[2,] <- x[1,] + (lr - 0.999) * x[3,] ## since own algorithm recalculates
## the sequence, this makes sure that
## I will certainly get the result of seq
## altough numerical errors may occurs
restotal <- prod(x[3, ])
if (length(y)!=0 && !all(y[3,] == x[3,]))
stop("the grids of x and y do not match ")
} else {
xx <- seq2grid(x, "x", grid,
RFopt$internal$warn_ambiguous, RFopt$general$gridtolerance)
if (length(y)!=0) {
yy <- seq2grid(y, "y", grid,
RFopt$internal$warn_ambiguous,
RFopt$general$gridtolerance)
if (xor(is.logical(xx), is.logical(yy)) ||
(!is.logical(xx) && !all(yy[3,] == xx[3,])))
stop("the grids for x and y do not match")
}
if (missing(grid) || length(grid) == 0) grid <- !is.logical(xx)
if (grid) {
x <- xx
if (length(y) != 0) y <- yy
restotal <- prod(len)
len <- 3
} else {
x <- sapply(x, function(z) z)
if (length(y) != 0) y <- sapply(y, function(z) z)
}
}
if (grid && any(x[3, ] <= 0))
stop(paste("step must be postive. Got as steps",
paste(x[3,], collapse=",")))
##if (len == 1) stop("Use grid=FALSE if only a single point is simulated")
}
if (!grid) {
restotal <- nrow(x)
if (length(y)==0) {
if (restotal < 200 && any(as.double(dist(x)) == 0)) {
d <- as.matrix(dist(x))
diag(d) <- 1
idx <- which(as.matrix(d) ==0)
if (printlevel>PL_ERRORS)
Print(x, dim(d), idx , cbind( 1 + ((idx-1)%% nrow(d)), #
1 + as.integer((idx - 1) / nrow(d))) )
warning("locations are not distinguishable")
}
## fuer hoehere Werte con total ist ueberpruefung nicht mehr praktikabel
}
}
if (coord_system == "earth") {
# if (ncol(x) > 4) stop("earth coordinates have maximal 3 components")
opt <- RFoptions()$coords ## muss nochmals neu sein
global.units <- opt$new_coordunits[1]
if (global.units[1] == "") global.units <- "km"
Raumdim <- ncol(x) #if (grid) ncol(x) else
new_is_cartesian <- new_coord_system %in% CARTESIAN_SYSTEMS
if (new_is_cartesian) {
if (sum(idx <- is.na(opt$zenit))) {
zenit <- (if (grid) x[1, 1:2] + x[2, 1:2] * (x[3, 1:2] - 1) else
if (opt$zenit[!idx] == 1) colMeans(x[, 1:2]) else
if (opt$zenit[!idx] == Inf) colMeans(apply(x[, 1:2], 2, range)) else
stop("unknown value of zenit"))
RFoptions(zenit = zenit)
}
code <- switch(new_coord_system,
"cartesian" = CARTESIAN_COORD,
"gnomonic" = GNOMONIC_PROJ,
"orthographic" = ORTHOGRAPHIC_PROJ,
stop("unknown projection method")
)
x <- RFfctn(RMtrafo(new=code), x, grid=grid,
coords.new_coordunits=global.units,
coords.new_coord_system = "keep")
if (length(y) != 0)
y <- RFfctn(RMtrafo(new=code), y, grid=grid,
coords.new_coordunits=global.units,
coords.new_coord_system = "keep")
if (new_coord_system == "cartesian") {
Raumdim <- max(3, Raumdim)
spatialdim <- Raumdim
}
dim(x) <- c(length(x) /Raumdim, Raumdim)
#x <- t(x)
## never try to set the following lines outside the 'if (new_coord_system'
## as in case of ..="keep" none of the following lines should be set
RFoptions(coords.coord_system =
if (new_is_cartesian) "cartesian" else new_coord_system)
grid <- FALSE
} else if (!(new_coord_system %in% c("keep", "sphere", "earth"))) {
warning("unknown new coordinate system")
}
}
if (Zeit <- length(T)!=0) {
Ttriple <- length(T) == 3;
if (length(T) <= 2) Tgrid <- TRUE
else {
dT <- diff(T)
Tgrid <- max(abs(diff(dT))) < dT[1] * RFopt$general$gridtolerance
}
if (is.na(RFopt$general$Ttriple)) {
if (Ttriple && Tgrid)
stop("ambiguous definition of 'T'. Set RFoptions(Ttriple=TRUE) or ",
"RFoptions(Ttriple=FALSE)")
if (!Ttriple && !Tgrid) stop("'T' does not have a valid format")
} else if (RFopt$general$Ttriple) {
if (!Ttriple)
stop("'T' is not given in triple format 'c(start, step, length)'")
Tgrid <- FALSE
} else {
if (!Tgrid) stop("'T' does not define a grid")
Ttriple <- FALSE
}
if (Tgrid)
T <- as.vector(seq2grid(list(T), "T", Tgrid,
RFopt$internal$warn_ambiguous,
RFopt$general$gridtolerance))
restotal <- restotal * T[3]
}
if (!missing(dim) && !is.null(dim) && spatialdim != dim) {
stop("'dim' should be given only when 'distances' are given. Here, 'dim' contradicts the given coordinates.")
}
if (ex.red) {
if (grid) {
x[3, ] <- pmin(x[3, ], ex.red)
if (length(y) > 0) y[3, ] <- pmin(y[3, ], ex.red)
restotal <- as.integer(prod(x[3, ]))
} else {
len <- restotal <- as.integer(min(nrow(x), ex.red^spatialdim))
x <- x[1:len, , drop=FALSE]
if (length(y) > 0) y <- y[1:len, , drop=FALSE]
}
if (Zeit) {
T[3] <- min(T[3], 3)
restotal <- as.integer(restotal * T[3])
}
}
## keep exactly the sequence up to 'grid'
if (length(x) > 0) {
if (storage.mode(x) != "double") storage.mode(x) <- "double"
} else x <- double(0)
if (length(y) > 0) {
if (storage.mode(y) != "double") storage.mode(y) <- "double"
} else y <- double(0)
L <- list(x=x, #0
y=y, #1
T=as.double(T), #2
grid=as.logical(grid), #3
spatialdim=as.integer(spatialdim), #4
Zeit=Zeit, #5
dist.given=FALSE, #6
restotal=as.integer(restotal), ## 7, nr of locations
l=as.integer(len), ## 8, physical "length/rows" of input
coordunits = curunits, #9
new_coordunits = newunits) #10
class(L) <- "CheckXT"
return(L)
}
trafo.to.C_CheckXT <- function(new) {
if (is.list(new[[1]])) {
for(i in 1:length(new)) {
if (length(new[[i]]$x)>0 && !new[[i]]$grid) new[[i]]$x = t(new[[i]]$x)
if (length(new[[i]]$y)>0 && !new[[i]]$grid) new[[i]]$y = t(new[[i]]$y)
}
} else {
if (length(new$x)>0 && !new$grid) new$x = t(new$x)
if (length(new$y)>0 && !new$grid) new$y = t(new$y)
}
new
}
C_CheckXT <- function(x, y=NULL, z=NULL, T=NULL, grid, distances=NULL,
dim=NULL, # == spatialdim!
length.data,
y.ok = FALSE,
printlevel = RFoptions()$general$printlevel){
neu <- CheckXT(x=x, y=y, z=z, T=T, grid=grid, distances=distances,
dim=dim, length.data=length.data, y.ok=y.ok,
printlevel = printlevel)
return(trafo.to.C_CheckXT(neu))
}
RFearth2cartesian <- function(coord, units=NULL, system = "cartesian",
grid=FALSE) {
if (is.character(system)) system <- pmatch(system, ISONAMES) - 1
stopifnot(system %in%
c(CARTESIAN_COORD, GNOMONIC_PROJ, ORTHOGRAPHIC_PROJ))
if (is.null(units)) {
global.units <- RFoptions()$coords$new_coordunits[1]
units <- if (global.units[1] == "") "km" else global.units
}
if (!is.matrix(coord)) coord <- t(coord)
res <- RFfctn(RMtrafo(new=system), coord, grid=grid,
coords.new_coord_system = "keep",
coords.new_coordunits=units,
coords.coord_system="earth")
dimnames(res) <- list(NULL, c("X", "Y", "Z", "T")[1:ncol(res)])
return(res)
}
RFearth2dist <- function(coord, units=NULL, system="cartesian",
grid=FALSE, ...) {
if (is.character(system)) system <- pmatch(system, ISONAMES) - 1
stopifnot(system %in%
c(CARTESIAN_COORD, GNOMONIC_PROJ, ORTHOGRAPHIC_PROJ))
if (is.null(units)) {
global.units <- RFoptions()$coords$new_coordunits[1]
units <- if (global.units[1] == "") "km" else global.units
}
if (!is.matrix(coord)) coord <- t(coord)
z <- RFfctn(RMtrafo(new=system), coord, grid=grid,
coords.new_coord_system = "keep",
coords.new_coordunits=units,
coords.coord_system="earth")
return(dist(z, ...))
}
## used by RFratiotest, fitgauss, Crossvalidation, likelihood-ratio, RFempir
StandardizeData <- function(model,
x, y=NULL, z=NULL, T=NULL,
grid, data, distances=NULL,
RFopt, mindist_pts=2,
dim=NULL, allowFirstCols=TRUE, vdim = NULL, ...) {
#if (missing(x)) Print(data, T) else Print(data, T, x)
RFoptions(internal.examples_reduced=FALSE)
#Print(data); if (!missing(x)) print(x); Print(missing(x), y, z, T, missing(dim), missing(grid), missing(distances))
if (missing(dim)) dim <- NULL
if (missing(grid)) grid <- NULL
dist.given <- !missing(distances) && length(distances)>0
matrix.indep.of.x.assumed <- FALSE
rangex <- neu <- gridlist <- RFsp.coord <- gridTopology <- data.RFparams <-
mindist <- data.col <- NULL
if (missing(data)) stop("missing data")
missing.x <- missing(x)
if (isSpObj(data)) data <- sp2RF(data)
if (isRFsp <- is(data, "RFsp") || (is.list(data) && is(data[[1]], "RFsp"))){
## ||(is.list(data) && is(data[[1]], "RFsp")))
if ( (!missing.x && length(x)!=0) || length(y)!=0 || length(z) != 0 ||
length(T) != 0 || dist.given || length(dim)!=0 || length(grid) != 0)
stop("data object already contains information about the locations. So, none of 'x' 'y', 'z', 'T', 'distance', 'dim', 'grid' should be given.")
if (!is.list(data)) data <- list(data)
sets <- length(data)
x <- RFsp.coord <- gridTopology <- data.RFparams <- vector("list", sets)
if (!is.null(data[[1]]@.RFparams)) {
if (length(vdim) > 0) stopifnot( vdim == data[[1]]@.RFparams$vdim)
else vdim <- data[[1]]@.RFparams$vdim
}
dimdata <- NULL
dimensions <- (if (isGridded(data[[1]])) data[[1]]@grid@cells.dim
else nrow(data[[1]]@data))
dimensions <- c(dimensions, data[[1]]@.RFparams$vdim)
for (i in 1:length(data)) {
xi <- list()
xi$grid <- isGridded(data[[i]])
compareGridBooleans(grid, xi$grid)
data[[i]] <- selectDataAccordingFormula(data[[i]], model=model)
data.RFparams[[i]] <- data[[i]]@.RFparams
gridTopology[[i]] <- if (xi$grid) data[[i]]@grid else NULL
RFsp.coord[[i]] <- if (!xi$grid) data[[i]]@coords else NULL
dimensions <- if (xi$grid) data[[i]]@grid@cells.dim else nrow(data[[i]]@data)
dimensions <- c(dimensions, data[[i]]@.RFparams$vdim)
if (RFopt$general$vdim_close_together) dimensions <- rev(dimensions)
dimdata <- rbind(dimdata, c(dimensions, data[[i]]@.RFparams$n))
tmp <- RFspDataFrame2conventional(data[[i]])
xi$x <- tmp$x
if (!is.null(tmp$T)) xi$T <- tmp$T
data[[i]] <- as.matrix(tmp$data)
x[[i]] <- xi
}
idx <- if (RFopt$general$vdim_close_together) 1 else length(dimensions)
if (all(dimdata[, idx] == 1))
dimdata <- dimdata[, -idx, drop=FALSE]
if (all(dimdata[, ncol(dimdata)] == 1)) # repet
dimdata <- dimdata[, -ncol(dimdata), drop=FALSE]
} else { # !isRFsp
## dimdata wird spaeter bestimmt
if (dist.given) {
stopifnot(missing(x) || length(x)==0, length(y)==0, length(z)==0)
if (!is.list(distances)) {
distances <- list(distances)
if (is.list(data))
stop("if list of data is given then also for distances ")
data <- list(as.matrix(data))
} else if (!is.list(data)) {
stop("if list of distances is given then also for data ")
if (length(data) != length(distances))
stop("length of distances does not match length of data")
}
for (i in 1:length(distances)) {
if (any(is.na(data)))
stop("missing data are not allowed if distances are used.")
}
stopifnot(missing(T) || length(T)==0)
if (is.matrix(distances[[1]])) {
dimensions <- sapply(distances, nrow)
spatialdim <- tsdim <- xdimOZ <- dimensions[1]
if (length(dim) > 0 && dim != spatialdim)
stop("unclear specification of the distances: either the distances is given as a vector or distance vectors should given, where the number of rows matches the spatial dimension")
lcc <- sapply(distances, function(x) 0.5 * (1 + sqrt(1 + 8 * ncol(x))) )
if (!all(diff(dimensions) == 0))
stop("sets of distances show different dimensions")
range_distSq <- function(M) range(apply(M, 2, function(z) sum(z^2)))
rangex <- sqrt(range(sapply(distances, range_distSq)))
} else {
xdimOZ <- 1L
spatialdim <- tsdim <- as.integer(dim)
lcc <- sapply(distances, function(x) if (is.matrix(x)) -1
else 0.5 * (1 + sqrt(1 + 8* length(x))))
rangex <- range(sapply(distances, range))
}
# Print(mindist, rangex, RFopt$nugget$tol)
mindist <- min(rangex)
if (is.na(mindist)) mindist <- 1 ## nur 1 pkt gegeben, arbitraerer Wert
if (mindist <= RFopt$nugget$tol) {
if (!RFopt$general$allowdistanceZero)
stop("distance with value 0 identified -- use allowdistanceZero=T?")
mindist <- 1e-15 * (RFopt$nugget$tol == 0) + 2 * RFopt$nugget$tol
for (i in 1:length(distances))
if (is.vector(distances[[i]]))
distances[[i]][distances[[i]] == 0] <- mindist
else distances[[i]][1, apply(distances[[i]], 2,
function(z) sum(z^2))] <- mindist
}
len <- as.integer(lcc)
if (any(len != lcc)) stop("number of distances not of form k(k-1)/2")
neu <- CheckXT(distances=distances, dim = spatialdim)
coordunits <- RFopt$coords$coordunits
Zeit <- FALSE
} else { ## distances not given
if (is.data.frame(data) || !is.list(data)) {
# Print(missing(x), x, data, is.data.frame(data), !is.list(data))
if (!missing(x) && is.list(x) && !is.data.frame(x) &&
(length(x$grid)==0 || length(x$restot)==0))
stop("either both coordinates and data must be lists or none")
data <- list(data)
}
sets <- length(data)
for (i in 1:sets) {
if (is.data.frame(data[[i]]) || is.vector(data[[i]]))
data[[i]] <- as.matrix(data[[i]])
}
sel <- try(selectAccordingFormula(data[[1]], model), silent=TRUE)
if (is(sel, "try-error")) sel <- NULL
if (missing(x)) { ## dec 2012: matrix.indep.of.x.assumed
if (!is.null(dnames <- colnames(data[[1]]))) {
if ((!any(is.na(xi <- RFopt$coord$coordnames))) ||
(length(xi <- earth_coordinate_names(dnames)) == 2) ||
(length(xi <- cartesian_coordinate_names(dnames)) > 0) ||
(length(xi <- general_coordinate_names(dnames)) > 0) ) {
x <- list()
for (i in 1:sets) {
xx <- data[[i]][ , xi, drop=FALSE]
storage.mode(xx) <- "numeric"
x[[i]] <- list(x=xx, grid = FALSE)
if (length(sel) == 0) sel <- -xi
}
}
}
if (missing(x)) { ## if still missing
data.col <- try(data.columns(data[[1]], xdim=dim,
force=allowFirstCols,
halt=!allowFirstCols))
x <- list()
if (is(data.col, "try-error")) {
if (length(sel) > 0){
for (i in 1:sets) {
x[[i]] <- data[[i]][ , !sel, drop=FALSE]
storage.mode(x[[i]]) <- "numeric"
}
if (length(dim) == 0) {
warning("better give 'dim' explicitely.")
}
if (length(dim) > 0 && ncol(x[[i]]) != dim)
stop("'dim' does not match the recognized coordindates")
} else {
sel <- TRUE
data.col <- NULL
matrix.indep.of.x.assumed <- TRUE
for (i in 1:sets) {
x[[i]] <- 1:nrow(as.matrix(data[[i]]))
storage.mode(x[[i]]) <- "numeric"
}
}
### x[1] <- 0 ## so no grid ! ## why forbidding ?? 15.5.2015
} else {
for (i in 1:sets) {
xx <- data[[i]][, data.col$x, drop=FALSE]
storage.mode(xx) <- "numeric"
x[[i]] <- list(x=xx, grid=FALSE)
if (length(sel) == 0) sel <- data.col$data
}
}
}
for (i in 1:sets) {
data[[i]] <- data[[i]][ , sel, drop=FALSE]
storage.mode(data[[i]]) <- "numeric"
}
} ## xgiven; KEIN ELSE, auch wenn nachfolgend z.T. gedoppelt wird
if (is.data.frame(x)) x <- as.matrix(x)
if (is.list(x)) {
if (length(y)!=0 || length(z)!=0 || length(T)!=0)
stop("if x is alist 'y', 'z', 'T' may not be given")
if (!is.list(x[[1]])) {
if (length(data) == 1) x <- list(x)
else stop("number of sets of 'x' and 'data' differ")
}
} else {
x <- list(x=x)
if (length(y)!=0) {
stopifnot(!is.list(y))
x$y <- y
}
if (length(z)!=0) {
stopifnot(!is.list(z))
x$z <- z
}
if (length(T)!=0) {
stopifnot(!is.list(T))
x$T <- T
}
if (!is.null(grid))
x$grid <- grid
if (!is.list(data)) data <- list(as.matrix(data))
x <- list(x)
}
##}
} # ! distance
sets <- length(data)
dimdata <- matrix(nrow=sets, ncol=length(base::dim(data[[1]])))
for (i in 1:sets) dimdata[i, ] <- base::dim(data[[i]])
} # !isRFsp
if (!dist.given) { ## x coordinates, not distances
neu <- CheckXT(x=x) #, y=y, z=z, T=T, grid=grid, distances=distances,
# dim=dim, length) # , length.data=length(data[[i]]), printlevel = 0)
if (!is.list(neu[[1]])) neu <- list(neu)
coordunits<- neu[[1]]$coordunits
spatialdim <- as.integer(neu[[1]]$spatialdim)
Zeit <- neu[[1]]$Zeit
tsdim <- as.integer(spatialdim + Zeit)
len <- sapply(neu, function(x)
(if (x$grid) prod(x$x[3, ]) else nrow(x$x)) *
(if (Zeit) x$T[3] else 1))
getrange <- function(x)
if (x$grid) rbind(x$x[1, ], x$x[1, ] + x$x[2, ] * (x$x[3, ] - 1))
else apply(x$x, 2, range)
rangex <- sapply(neu, getrange)
## falls mehrere datasets:
if (ncol(x[[1]]$x) > 1 || is.null(x[[1]]$dist.given) || !x[[1]]$dist.given){
rangex <- t(rangex)
base::dim(rangex) <- c(length(rangex) / spatialdim, spatialdim)
}
rangex <- apply(rangex, 2, range)
getmindistSq <- function(x) {
if (x$grid) sum(x$x[2,]^2)
else if (nrow(x$x) < 2) NA
else if (nrow(x$x) <= mindist_pts) min(dist(x$x))
else min(dist(x$x[sample(nrow(x$x), mindist_pts), ]))
}
if (Zeit && any(sapply(neu, function(x) x$T[2]) <= RFopt$nugget$tol))
stop("step of time component smaller than nugget tolerance 'tol'")
if (any(sapply(neu, function(x) x$grid && any(x$x[2, ]<=RFopt$nugget$tol))))
stop("step of some spatial component smaller than nugget tolerance 'tol'")
zaehler <- 0
repeat {
mindist <- sqrt(min(sapply(neu, getmindistSq)))
if (is.na(mindist)) mindist <- 1 ## nur 1 pkt gegeben, arbitraerer Wert
if (mindist <= RFopt$nugget$tol) {
if (!RFopt$general$allowdistanceZero)
stop("Distance with value 0 identified -- use allowdistanceZero=T?")
if ((zaehler <- zaehler + 1) > 10)
stop("unable to scatter point pattern")
for (i in 1:length(neu)) if (!neu[[i]]$grid)
neu[[i]]$x <- neu[[i]]$x + rnorm(length(neu[[i]]$x), 0,
10 * RFopt$nugget$tol)
} else break;
}
xdimOZ <- ncol(neu[[1]]$x)
}
if (length(dim) > 0) stopifnot(dim == tsdim)
varnames <- try(colnames(data[[1]]))
## geht x[[1]]$x immer gut ??
# Print(missing(x), neu)
names <- GetDataNames(model=model,
coords=if (missing(x)) NULL else x[[1]]$x,
locinfo=neu[[1]]) #ohne data!
if (is.null(names$varnames))
names$varnames <-
if (class(varnames) == "try-error") NULL else varnames
restotal <- sapply(neu, function(x) x$restotal)
ldata <- sapply(data, length)
if (length(vdim) == 0) {
if (all(sapply(data, function(x) is.vector(x) || ncol(x) == 1)))
vdim <- 1
else if (!missing(model)) {
vdim <- rfInit(list("Cov", PrepareModel2(model=model, ...,
x=trafo.to.C_CheckXT(neu))),
x=x, y=y, z=z, T=T, grid=grid, distances=distances,
dim=dim, reg=MODEL_AUX, dosimulate=FALSE)[1]
} else vdim <- NA
}
repetitions <- as.integer(ldata / (restotal * vdim))
# Print(data, ldata, repetitions, restotal, vdim, neu, dist.given)
if (!is.na(vdim) && any(ldata != repetitions * restotal * vdim))
stop("mismatch of data dimensions")
RFoptions(internal.examples_reduced=RFopt$internal$examples_red)
return(list(
## coord = expandiertes neu # #
model = if (missing(model)) NULL else
PrepareModel2(model, ..., x=trafo.to.C_CheckXT(neu)),
orig.model = if (missing(model)) NULL else model,
data=data, dimdata=dimdata, isRFsp = isRFsp,
RFsp.coord = RFsp.coord,
coord = neu,
dist.given=dist.given,
gridTopology = gridTopology,
data.RFparams = data.RFparams,
spatialdim=spatialdim,
tsdim=tsdim,
rangex = as.matrix(rangex),
coordunits=coordunits,
Zeit = Zeit,
matrix.indep.of.x.assumed = matrix.indep.of.x.assumed,
len = len,
mindist = mindist,
xdimOZ = xdimOZ,
vdim = vdim,
coordnames=names$coordnames,
varnames=if (length(names$varnames)==0) ""
else names$varnames,
data.col = data.col,
repetitions = repetitions
))
}
| /RandomFields/R/convert.R | no_license | ingted/R-Examples | R | false | false | 46,092 | r |
## Authors
## Martin Schlather, schlather@math.uni-mannheim.de
##
##
## Copyright (C) 2015 Martin Schlather
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License
## as published by the Free Software Foundation; either version 3
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
PrepareModel2 <- function(model, ..., x=NULL) {
if (missing(model) || is.null(model)) stop("'model' must be given.")
method <- "ml"
if (class(model) == "RF_fit") model <- model[[method]]$model
else if (class(model) == "RFfit") model <- model[method]
m <- parseModel(model, ..., x=x)
if (notplus <- !(m[[1]] %in% ZF_PLUS)) m <- list(ZF_SYMBOLS_PLUS, m)
for (i in 2:length(m)) {
if ((m[[i]][[1]] %in% ZF_MIXED) && length(m[[i]]$X)==1 &&
is.numeric(m[[i]]$X) && m[[i]]$X==1 && !is.null(m[[i]]$b)) {
m[[i]] <- list(ZF_TREND[2], mean=m[[i]]$b)
if (RFoptions()$general$printlevel > PL_IMPORTANT)
message(paste("The '1' in the mixed model definition has been replaced by '", ZF_TREND[1], "(mean=", m[[i]]$mean, ")'.", sep=""))
}
}
if (notplus) m <- m[[2]]
class(m) <- "RM_model"
return(m)
# if (class(model) != "formula") {
# if (is.list(model)) return(model)
# else stop("model of unknown form -- maybe you have used an obsolete definition. See ?RMmodel for the model definition")
# }
# return(listmodel)
}
PrepareModel <- function(model, param, trend=NULL,
nugget.remove=TRUE, method=NULL) {
## any of the users model definition (standard, nested, list) for the
## covariance function is transformed into a standard format, used
## especially in the c programs
##
## overwrites in some situation the simulation method for nugget.
## allows trend to be NA (or any other non finite value -- is not checked!)
## trend has not been implemented yet!
if (is(model, ZF_MODEL))
stop("models of class ZF_MODEL cannot be combined with obsolete RandomFields functions")
if (!is.null(method)) stop("to give method in PrepareModel is obsolete")
if (!is.null(trend))
if (!is.numeric(trend) || length(trend)!=1)
stop("in the obsolete setting, only constant mean can used")
if (is.list(model) && is.character(model[[1]]) &&
(is.null(names(model)) || names(model)[[1]]=="")) {
if (!missing(param) && !is.null(param))
stop("param cannot be given in the extended definition")
if (is.null(trend)) return(model)
trend <- list(ZF_TREND[2], mean=trend)
if (model[[1]] %in% ZF_PLUS) return(c(model, list(trend)))
else return(list(ZF_SYMBOLS_PLUS, model, trend))
}
printlevel <- RFoptions()$general$printlevel
STOP <- function(txt) {
if (printlevel>=PL_ERRORS) {
cat("model: ")
if (!missing.model) Print(model) else cat(" missing.\n") #
cat("param: ")
if (!missing.param) Print(param) else cat(" missing.\n") #
cat("trend: ")
Print(trend) #
}
stop("(in PrepareModel) ", txt, call.=FALSE)
}
transform <- function(model) {
if (!is.list(model)) {
STOP("some elements of the model definition are not lists")
}
m <- list(DOLLAR[1], var=model$v)
lm <- length(model) - 3 # var, scale/aniso, name
if (!is.null(model$a)) m$aniso <- model$a else m$scale <- model$scale
## model <- c(model, if (!is.null(model$a))
## list(aniso=model$a) else list(scale=model$s)) ## ???
if (!is.na(p <- pmatch("meth", names(model), duplicates.ok=TRUE))) {
if (printlevel>=PL_ERRORS) Print(p, model) #
stop("method cannot be given with the model anymore. It must be given as a parameter to the function. See 'RFoptions' and 'RFsimulate'")
}
if (!is.null(model$me))
stop("'mean' seems to be given within the inner model definitions");
if (!is.character(model$m)) {
stop("'model' was not given extacly once each odd number of list entries or additional unused list elements are given.")
}
m1 <- list(model$m)
if (!is.null(model$k)) {
lm <- lm - 1
if (length(model$k) != 0)
for (i in 1:length(model$k)) {
eval(parse(text=paste("m1$k", i, " <- model$k[", i, "]", sep="")))
}
}
if (lm != 0) {
if (printlevel>=PL_ERRORS) Print(lm, model) #
stop("some parameters do not fit")
}
m <- c(m, list(m1))
return(m)
} # end transform
op.list <- c(ZF_SYMBOLS_PLUS, ZF_SYMBOLS_MULT) ## if others use complex list definition !
missing.model <- missing(model)
missing.param <- missing(param) || is.null(param)
if (missing.param && is.null(model$param)) { ## full model
if (RFoptions()$internal$warn_oldstyle)
warning("the sequential list format is depreciated.")
if (missing.model || (length(model)==0)) model <- list()
else if (!is.list(model))
STOP("if param is missing, model must be a list of lists (or a list in the extended notation)")
if (is.null(trend) + is.null(model$mean) + is.null(model$trend)<2)
STOP("trend/mean is given twice")
if (!is.null(model$mean)) trend <- model$mean else
if (!is.null(model$trend)) trend <- model$trend else trend <- NULL
model$trend <- model$mean <- NULL
## the definition might be given at a deeper level as element
## $model of the list:
if (is.list(model$model)) {
if (!is.list(model$model[[1]]))
STOP("if param is missing, the model$model must be a list of lists")
model <- model$model
}
if (length(model)==0) { ## deterministic
return(if (is.null(trend)) NULL else list(ZF_TREND[2], mean=trend))
}
if (length(model) %% 2 !=1) STOP("list for model definition should be odd")
if (length(model)==1)
return(if (is.null(trend) ||
is.numeric(trend) && length(trend)==1 && !is.na(trend)&&trend==0)
transform(model[[1]])
else list(ZF_SYMBOLS_PLUS, transform(model[[1]]),
list(ZF_TREND[2], mean=trend)));
op <- pmatch(c(model[seq(2, length(model), 2)], recursive=TRUE),
op.list, duplicates.ok=TRUE) - 1
if (!all(is.finite(op))) STOP("operators are not all allowed; see the extended list definition for extensions")
model <- model[seq(1, length(model), 2)]
plus <- which(op==0)
if (length(plus) == 0) {
m <- list("*", lapply(model, transform))
} else {
plus <- c(0, plus, length(op)+1)
m <- list(ZF_SYMBOLS_PLUS)
for (i in 1:(length(plus) - 1)) {
m[[i+1]] <-
if (plus[i] + 1 == plus[i+1]) transform(model[[plus[i] + 1]])
else list(ZF_SYMBOLS_MULT,
lapply(model[(plus[i] + 1) : plus[i+1]], transform))
}
}
model <- m
} else { ## standard definition or nested model
if (missing.param) { ## a simple list of the model and the
## parameters is also possible
if (is.null(param <- model$p)) STOP("is.null(model$param)")
stopifnot(is.null(trend) || is.null(model$trend))
if (is.null(trend)) trend <- model$trend
if (!is.null(model$mean)) {
if (!is.null(trend)) STOP("mean and trend given twice")
trend <- model$mean
}
model <- model$model
}
stopifnot(is.character(model), length(model)==1)
if (is.matrix(param)) { ## nested
if (nrow(param) == 1)
return(PrepareModel(model=model, param=c(param[1], 0, param[-1]),
trend=trend))
name <- model
model <- list(ZF_SYMBOLS_PLUS)#, method=method)
for (i in 1:nrow(param)) {
model <- c(model,
if (is.na(param[i, 2]) || param[i, 2] != 0)
list(list(DOLLAR[1], var=param[i, 1], scale=param[i, 2],
if (ncol(param) >2) list(name, k=param[i,-1:-2])
else list(name)))
else list(list(DOLLAR[1], var=param[i,1],
list(ZF_NUGGET[2]))))
}
} else if (is.vector(param)) { ## standard, simple way
## falls trend gegeben, dann ist param um 1 Komponente gekuerzt
if (is.null(trend)) {
trend <- param[1]
param <- param[-1]
} else message("It is assumed that no mean is given so that the first component of param is the variance")
if (model == ZF_NUGGET[2]) {
model <- transform(list(model=model, var=sum(param[1:2]), scale=1))
} else {
if (length(param) > 3)
model <- transform(list(model=model, var=param[1], scale=param[3],
k=param[-1:-3]))
else
model <- transform(list(model=model, var=param[1], scale=param[3]))
if (is.na(param[2]) || param[2] != 0 || !nugget.remove) {# nugget
model <- list(ZF_SYMBOLS_PLUS,
model,
transform(list(model=ZF_NUGGET[2], var=param[2], scale=1)))
}
## if (!is.null(method)) model <- c(model, method=method) ## doppelt
}
} else stop("unknown format") # end nested/standard definition
}
return(if (is.null(trend) ||
is.numeric(trend) && length(trend)==1 && !is.na(trend) &&trend==0)
return(model)
else if (model[[1]] %in% ZF_PLUS)
c(model, list(list(ZF_TREND[2], mean=trend)))
else list(ZF_SYMBOLS_PLUS, model, list(ZF_TREND[2], mean=trend)))
}
seq2grid <- function(x, name, grid, warn_ambiguous, gridtolerance) {
xx <- matrix(nrow=3, ncol=length(x))
step0 <- rep(FALSE, length(x))
gridnotgiven <- missing(grid) || length(grid) == 0
for (i in 1:length(x)) {
if (length(x[[i]]) == 1) {
xx[,i] <- c(x[[i]], 0, 1)
next
}
step <- diff(x[[i]])
if (step[1] == 0.0) {
ok <- step0[i] <- all(step == 0.0)
} else {
ok <- max(abs(step / step[1] - 1.0)) <= gridtolerance
}
if (!ok) {
if (gridnotgiven) return(FALSE)
if (!TRUE)
Print(i, x[[i]][1:min(100, length(x[[i]]))], #
step[1:min(100,length(step))],
range(diff(step[1:min(100,length(step))])))
stop("Different grid distances detected, but the grid must ",
"have equal distances in each direction -- if sure that ",
"it is a grid, increase the value of 'gridtolerance' which equals ",
gridtolerance,".\n")
}
xx[,i] <- c(x[[i]][1], step[1], if (step0[i]) 1 else length(x[[i]]))
}
if (FALSE && gridnotgiven && warn_ambiguous && length(x) > 1) {
RFoptions(internal.warn_ambiguous = FALSE)
message("Ambiguous interpretation of coordinates. Better give 'grid=TRUE' explicitly. (This message appears only once per session.)")
}
if (any(step0)) {
if (all(step0)) {
if (gridnotgiven) return(FALSE)
else stop("Within a grid, the coordinates must be distinguishable")
} else {
if (gridnotgiven && warn_ambiguous) {
RFoptions(internal.warn_ambiguous = FALSE)
warning("Interpretation as degenerated grid. Better give 'grid' explicitely. (This warning appears only once per session.)")
}
}
}
return(xx)
}
CheckXT <- function(x, y=NULL, z=NULL, T=NULL, grid, distances=NULL,
dim=NULL, # == spatialdim!
length.data,
y.ok = FALSE,
printlevel = RFoptions()$general$printlevel){
## do not pass anything on "..." ! --- only used for internal calls
## when lists are re-passed
## converts the given coordinates into standard formats
## (one for arbitrarily given locations and one for grid points)
#print("CheckXT in convert.R")#Berreth
if (!missing(x)) {
if (is(x, "CheckXT")) return(x)
if (is.list(x)) {
if (!is.list(x[[1]])) return(do.call("CheckXT", x))
L <- list()
for (i in 1:length(x)) {
L[[i]] <-
if (is(x[[i]], "CheckXT")) x[[i]] else do.call("CheckXT", x[[i]])
}
if (length(x) > 1) {
if (!all(diff(sapply(L, function(x) x$Zeit)) == 0) ||
!all(diff(sapply(L, function(x) x$spatialdim)) == 0))
stop("all sets must have the same dimension")
if (!all(diff(sapply(L, function(x) x$dist.given)) == 0))
stop("either all the sets must be based on distances or none")
}
class(L) <- "CheckXT"
return(L)
}
}
RFopt <- RFoptions()
curunits <- RFopt$coords$coordunits
newunits <- RFopt$coords$new_coordunits
coord_system <- RFopt$coords$coord_system
new_coord_system <- RFopt$coords$new_coord_system
ex.red <- RFopt$internal$examples_reduced
if (!missing(distances) && !is.null(distances)) { ## length==0 OK!
stopifnot(is.matrix(distances) || (!missing(dim) && !is.null(dim)),
(missing(grid) || length(grid) == 0),
missing(x) || is.null(x),
length(y)==0,
length(z)==0,
length(T)==0)
if (coord_system != new_coord_system && new_coord_system != "keep")
stop("coordinate systems differ")
if (is.list(distances)) {
L <- list()
for (i in 1:length(distances))
L[[i]] <- do.call("CheckXT", list(distances=distances[[i]], dim=dim))
class(L) <- "CheckXT"
return(L)
}
if (class(distances) == "dist") {
x <- as.vector(distances)
len <- length(distances)
} else if (is.matrix(distances) || is.vector(distances)) {
if (is.matrix(distances)) {
len <- nrow(distances)
if (is.null(dim)) dim = ncol(distances)
else if (dim != ncol(distances))
stop("matrix of distances does not fit the given dimension")
} else {
len <- length(distances)
if (is.null(dim))
stop("dim is not given although 'distances' are used")
}
x <- distances
} else {
stop("'distances' not of required format.")
}
if (ex.red && len > ex.red^2 / 2) {
LEN <- as.integer(ex.red)
len <- as.integer(LEN * (LEN - 1) / 2)
x <- if (is.matrix(x)) x[1:len ,] else x[1:len]
} else {
LEN <- as.integer(1e-9 + 0.5 * (1 + sqrt(1 + 8 * len)))
if (LEN * (LEN-1) / 2 != len) LEN <- NaN
}
## keep exactly the sequence up to 'distances'
if (storage.mode(x) != "double") storage.mode(x) <- "double"
L <- list(x = as.matrix(x), #0
y = double(0), #1
T= double(0), #2
grid = FALSE, #3
spatialdim=as.integer(dim),#4
Zeit=FALSE, #5
dist.given = TRUE, #6
restotal = LEN, ## number of points
l = LEN, ## ?? physical length??
coordunits = curunits,
new_coordunits = newunits
)
class(L) <- "CheckXT"
return(L)
}
stopifnot(!missing(x))
if (is(x, "RFsp") || isSpObj(x)) {
return(CheckXT(x=coordinates(x), y=y, z=z, T=T, grid=grid,
distances=distances, dim=dim, length.data=length.data,
y.ok=y.ok, printlevel=printlevel))
}
if (is.raster(x)) x <- as(x, 'GridTopology')
if ((missing(grid) || length(grid) == 0) && !missing(length.data)) {
new <- try(CheckXT(x=x, y=y, z=z, T=T, grid=TRUE, distances=distances,
dim=if (!missing(dim)) dim,
length.data = length.data, y.ok =y.ok,
printlevel = printlevel
), silent=TRUE)
if (grid <- (class(new) != "try-error")) {
ratio <- length.data / new$restotal
if (grid <- ratio == as.integer(ratio)) {
if (printlevel>=PL_IMPORTANT && new$spatialdim > 1)
message("Grid detected. If it is not a grid, set grid=FALSE.\n")
}
}
return(if (grid) new else {
CheckXT(x, y, z, T, grid=FALSE, distances,
if (!missing(distances) && length(distances) > 0) dim=1,
length.data = length.data,
printlevel = printlevel) }
)
} # if (missing(grid) && !missing(length.data))
gridtriple <- FALSE
if (is.GridTopology <- is(x, "GridTopology")){
x <- rbind(x@cellcentre.offset,
x@cellsize,
x@cells.dim)
if ((missing(grid) || length(grid) == 0)) grid <- TRUE else stopifnot(grid)
gridtriple <- TRUE
}
##else {
## is.GridTopology <- FALSE
##}
if (is.data.frame(x)) {
if (ncol(x)==1) x <- as.vector(x) else x <- as.matrix(x)
}
stopifnot(length(x) != 0)
# stopifnot(all(unlist(lapply(as.list(x), FUN=function(li) is.numeric(li))))) ## wann benoetigt???
stopifnot(is.numeric(x))# um RFsimulte(model, data) statt data=data abzufangen
# stopifnot(all(is.finite(x)), all(is.finite(y)), all(is.finite(z))) ; s.u. unlist
if (is.matrix(x)) {
if (!is.numeric(x)) stop("x is not numeric.")
if (length(z)!=0) stop("If x is a matrix, then z may not be given")
if (length(y)!=0) {
if (!y.ok) stop("If x is a matrix, then y may not be given")
if (length(T)!=0)
stop("If x is a matrix and y is given, then T may not be given")
if (!is.matrix(y) || ncol(y) != ncol(x) ||
nrow(x)==3 && nrow(y)!=3 && ((missing(grid) || length(grid) == 0) ||
grid))
stop("y does not match x (it must be a matrix)")
}
if (coord_system == COORD_SYS_NAMES[coord_auto + 1] && ncol(x) >= 2
&& ncol(x) <= 3 && !is.null(n <- dimnames(x)[[2]])) {
if (any(idx <- earth_coordinate_names(n))) {
if (length(idx) == 2 && !all(idx == 1:2))
stop("earth coordinates not in order longitude/latitude")
cur <- curunits[1]
newunits <- RFopt$coords$new_coordunits
curunits <- RFopt$coords$coordunits
curunits[1:2] <- ZF_EARTHCOORD_NAMES[1:2]
if (newunits[1] == "") newunits[1] <- UNITS_NAMES[units_km + 1]
newunits[2:3] <- newunits[1]
if (RFopt$internal$warn_coordinates)
message("\n\nNOTE: current units are ",
if (cur=="") "not given and" else paste("'", cur, "', but"),
" earth coordinates detected:\n",
"earth coordinates will be transformed into units of '",
newunits[1],
"'.\nIn particular, the values of all scale parameters of ",
"any model defined\nin R^3 (currently all models!) are ",
"understood in units of '", newunits[1],
"'.\nChange options 'coord_system' and/or 'units' if ",
"necessary.\n(This message appears only once per session.)\n")
coord_system <- COORD_SYS_NAMES[earth + 1]
RFoptions(coords.coord_system = coord_system,
coords.coordunits = curunits,
coords.new_coordunits = newunits,
internal.warn_coordinates=FALSE)
} else {
RFoptions(coords.coord_system = COORD_SYS_NAMES[cartesian + 1])
}
}
spatialdim <- ncol(x)
len <- nrow(x)
if (spatialdim==1 && len != 3 && (missing(grid) || length(grid) == 0)) {
if (length(x) <= 2) grid <- TRUE
else {
dx <- diff(x)
grid <- max(abs(diff(dx))) < dx[1] * RFopt$general$gridtolerance
}
} # else {
if ((missing(grid) || length(grid) == 0) &&
any(apply(x, 2, function(z) (length(z) <= 2) || max(abs(diff(diff(z))))
> RFopt$general$gridtolerance))) {
grid <- FALSE
}
if ((missing(grid) || length(grid) == 0) || !is.logical(grid)) {
grid <- TRUE
if (spatialdim > 1 && RFopt$internal$warn_ambiguous) {
RFoptions(internal.warn_ambiguous = FALSE)
warning("Ambiguous interpretation of the coordinates. Better give the logical parameter 'grid=TRUE' explicitely. (This warning appears only once per session.)")
}
}
if (grid && !is.GridTopology) {
if (gridtriple <- len==3) {
if (printlevel >= PL_SUBIMPORTANT && RFopt$internal$warn_oldstyle) {
message("x was interpreted as a gridtriple; the new gridtriple notation is:\n 1st row of x is interpreted as starting values of sequences,\n 2nd row as step,\n 3rd row as number of points (i.e. length),\n in each of the ", ncol(x), " directions.")
}
} else len <- rep(len, times=spatialdim) # Alex 8.10.2011
}
if (grid && !gridtriple) {
## list with columns as list elements -- easier way to
## do it??
x <- lapply(apply(x, 2, list), function(r) r[[1]])
if (length(y) != 0) y <- lapply(apply(y, 2, list), function(r) r[[1]])
}
} else { ## x, y, z given separately
if (length(y)==0 && length(z)!=0) stop("y is not given, but z")
xyzT <- list(x=if (!missing(x)) x, y=y, z=z, T=T)
for (i in 1:4) {
if (!is.null(xyzT[[i]]) && !is.numeric(xyzT[[i]])) {
if (printlevel>PL_IMPORTANT)
message(names(xyzT)[i],
" not being numeric it is converted to numeric")
assign(names(xyzT)[i], as.numeric(xyzT[[i]]))
}
}
remove(xyzT)
spatialdim <- 1 + (length(y)!=0) + (length(z)!=0)
if (spatialdim==1 && ((missing(grid) || length(grid) == 0) || !grid)) {
## ueberschreibt Einstellung des Nutzers im Falle d=1
if (length(x) <= 2) newgrid <- TRUE
else {
dx <- diff(x)
newgrid <- max(abs(diff(dx))) < dx[1] * RFopt$general$gridtolerance
}
if ((missing(grid) || length(grid) == 0)) grid <- newgrid
else if (xor(newgrid, grid) && RFopt$internal$warn_on_grid) {
RFoptions(internal.warn_on_grid = FALSE)
message("coordinates", if (grid) " do not",
" seem to be on a grid, but grid = ", grid)
}
}
len <- c(length(x), length(y), length(z))[1:spatialdim]
if (!(missing(grid) || length(grid) == 0) && !grid) { ## sicher nicht grid, ansonsten ausprobieren
if (any(diff(len) != 0)) stop("some of x, y, z differ in length")
x <- cbind(x, y, z)
## make a matrix out of the list
len <- len[1]
} else {
if ((missing(grid) || length(grid) == 0) && any(len != len[1]))
grid <- TRUE
x <- list(x, y, z)[1:spatialdim]
}
y <- z <- NULL ## wichtig dass y = NULL ist, da unten die Abfrage
} ## end of x, y, z given separately
if (!all(is.finite(unlist(x)))) {
stop("coordinates are not all finite")
}
if ((missing(grid) || length(grid) == 0) || grid) {
if (gridtriple) {
if (len != 3)
stop("In case of simulating a grid with option gridtriple, exactly 3 numbers are needed for each direction")
lr <- x[3,] # apply(x, 2, function(r) length(seq(r[1], r[2], r[3])))
##x[2,] <- x[1,] + (lr - 0.999) * x[3,] ## since own algorithm recalculates
## the sequence, this makes sure that
## I will certainly get the result of seq
## altough numerical errors may occurs
restotal <- prod(x[3, ])
if (length(y)!=0 && !all(y[3,] == x[3,]))
stop("the grids of x and y do not match ")
} else {
xx <- seq2grid(x, "x", grid,
RFopt$internal$warn_ambiguous, RFopt$general$gridtolerance)
if (length(y)!=0) {
yy <- seq2grid(y, "y", grid,
RFopt$internal$warn_ambiguous,
RFopt$general$gridtolerance)
if (xor(is.logical(xx), is.logical(yy)) ||
(!is.logical(xx) && !all(yy[3,] == xx[3,])))
stop("the grids for x and y do not match")
}
if (missing(grid) || length(grid) == 0) grid <- !is.logical(xx)
if (grid) {
x <- xx
if (length(y) != 0) y <- yy
restotal <- prod(len)
len <- 3
} else {
x <- sapply(x, function(z) z)
if (length(y) != 0) y <- sapply(y, function(z) z)
}
}
if (grid && any(x[3, ] <= 0))
stop(paste("step must be postive. Got as steps",
paste(x[3,], collapse=",")))
##if (len == 1) stop("Use grid=FALSE if only a single point is simulated")
}
if (!grid) {
restotal <- nrow(x)
if (length(y)==0) {
if (restotal < 200 && any(as.double(dist(x)) == 0)) {
d <- as.matrix(dist(x))
diag(d) <- 1
idx <- which(as.matrix(d) ==0)
if (printlevel>PL_ERRORS)
Print(x, dim(d), idx , cbind( 1 + ((idx-1)%% nrow(d)), #
1 + as.integer((idx - 1) / nrow(d))) )
warning("locations are not distinguishable")
}
## fuer hoehere Werte con total ist ueberpruefung nicht mehr praktikabel
}
}
if (coord_system == "earth") {
# if (ncol(x) > 4) stop("earth coordinates have maximal 3 components")
opt <- RFoptions()$coords ## muss nochmals neu sein
global.units <- opt$new_coordunits[1]
if (global.units[1] == "") global.units <- "km"
Raumdim <- ncol(x) #if (grid) ncol(x) else
new_is_cartesian <- new_coord_system %in% CARTESIAN_SYSTEMS
if (new_is_cartesian) {
if (sum(idx <- is.na(opt$zenit))) {
zenit <- (if (grid) x[1, 1:2] + x[2, 1:2] * (x[3, 1:2] - 1) else
if (opt$zenit[!idx] == 1) colMeans(x[, 1:2]) else
if (opt$zenit[!idx] == Inf) colMeans(apply(x[, 1:2], 2, range)) else
stop("unknown value of zenit"))
RFoptions(zenit = zenit)
}
code <- switch(new_coord_system,
"cartesian" = CARTESIAN_COORD,
"gnomonic" = GNOMONIC_PROJ,
"orthographic" = ORTHOGRAPHIC_PROJ,
stop("unknown projection method")
)
x <- RFfctn(RMtrafo(new=code), x, grid=grid,
coords.new_coordunits=global.units,
coords.new_coord_system = "keep")
if (length(y) != 0)
y <- RFfctn(RMtrafo(new=code), y, grid=grid,
coords.new_coordunits=global.units,
coords.new_coord_system = "keep")
if (new_coord_system == "cartesian") {
Raumdim <- max(3, Raumdim)
spatialdim <- Raumdim
}
dim(x) <- c(length(x) /Raumdim, Raumdim)
#x <- t(x)
## never try to set the following lines outside the 'if (new_coord_system'
## as in case of ..="keep" none of the following lines should be set
RFoptions(coords.coord_system =
if (new_is_cartesian) "cartesian" else new_coord_system)
grid <- FALSE
} else if (!(new_coord_system %in% c("keep", "sphere", "earth"))) {
warning("unknown new coordinate system")
}
}
if (Zeit <- length(T)!=0) {
Ttriple <- length(T) == 3;
if (length(T) <= 2) Tgrid <- TRUE
else {
dT <- diff(T)
Tgrid <- max(abs(diff(dT))) < dT[1] * RFopt$general$gridtolerance
}
if (is.na(RFopt$general$Ttriple)) {
if (Ttriple && Tgrid)
stop("ambiguous definition of 'T'. Set RFoptions(Ttriple=TRUE) or ",
"RFoptions(Ttriple=FALSE)")
if (!Ttriple && !Tgrid) stop("'T' does not have a valid format")
} else if (RFopt$general$Ttriple) {
if (!Ttriple)
stop("'T' is not given in triple format 'c(start, step, length)'")
Tgrid <- FALSE
} else {
if (!Tgrid) stop("'T' does not define a grid")
Ttriple <- FALSE
}
if (Tgrid)
T <- as.vector(seq2grid(list(T), "T", Tgrid,
RFopt$internal$warn_ambiguous,
RFopt$general$gridtolerance))
restotal <- restotal * T[3]
}
if (!missing(dim) && !is.null(dim) && spatialdim != dim) {
stop("'dim' should be given only when 'distances' are given. Here, 'dim' contradicts the given coordinates.")
}
if (ex.red) {
if (grid) {
x[3, ] <- pmin(x[3, ], ex.red)
if (length(y) > 0) y[3, ] <- pmin(y[3, ], ex.red)
restotal <- as.integer(prod(x[3, ]))
} else {
len <- restotal <- as.integer(min(nrow(x), ex.red^spatialdim))
x <- x[1:len, , drop=FALSE]
if (length(y) > 0) y <- y[1:len, , drop=FALSE]
}
if (Zeit) {
T[3] <- min(T[3], 3)
restotal <- as.integer(restotal * T[3])
}
}
## keep exactly the sequence up to 'grid'
if (length(x) > 0) {
if (storage.mode(x) != "double") storage.mode(x) <- "double"
} else x <- double(0)
if (length(y) > 0) {
if (storage.mode(y) != "double") storage.mode(y) <- "double"
} else y <- double(0)
L <- list(x=x, #0
y=y, #1
T=as.double(T), #2
grid=as.logical(grid), #3
spatialdim=as.integer(spatialdim), #4
Zeit=Zeit, #5
dist.given=FALSE, #6
restotal=as.integer(restotal), ## 7, nr of locations
l=as.integer(len), ## 8, physical "length/rows" of input
coordunits = curunits, #9
new_coordunits = newunits) #10
class(L) <- "CheckXT"
return(L)
}
trafo.to.C_CheckXT <- function(new) {
if (is.list(new[[1]])) {
for(i in 1:length(new)) {
if (length(new[[i]]$x)>0 && !new[[i]]$grid) new[[i]]$x = t(new[[i]]$x)
if (length(new[[i]]$y)>0 && !new[[i]]$grid) new[[i]]$y = t(new[[i]]$y)
}
} else {
if (length(new$x)>0 && !new$grid) new$x = t(new$x)
if (length(new$y)>0 && !new$grid) new$y = t(new$y)
}
new
}
C_CheckXT <- function(x, y=NULL, z=NULL, T=NULL, grid, distances=NULL,
dim=NULL, # == spatialdim!
length.data,
y.ok = FALSE,
printlevel = RFoptions()$general$printlevel){
neu <- CheckXT(x=x, y=y, z=z, T=T, grid=grid, distances=distances,
dim=dim, length.data=length.data, y.ok=y.ok,
printlevel = printlevel)
return(trafo.to.C_CheckXT(neu))
}
RFearth2cartesian <- function(coord, units=NULL, system = "cartesian",
grid=FALSE) {
if (is.character(system)) system <- pmatch(system, ISONAMES) - 1
stopifnot(system %in%
c(CARTESIAN_COORD, GNOMONIC_PROJ, ORTHOGRAPHIC_PROJ))
if (is.null(units)) {
global.units <- RFoptions()$coords$new_coordunits[1]
units <- if (global.units[1] == "") "km" else global.units
}
if (!is.matrix(coord)) coord <- t(coord)
res <- RFfctn(RMtrafo(new=system), coord, grid=grid,
coords.new_coord_system = "keep",
coords.new_coordunits=units,
coords.coord_system="earth")
dimnames(res) <- list(NULL, c("X", "Y", "Z", "T")[1:ncol(res)])
return(res)
}
RFearth2dist <- function(coord, units=NULL, system="cartesian",
grid=FALSE, ...) {
if (is.character(system)) system <- pmatch(system, ISONAMES) - 1
stopifnot(system %in%
c(CARTESIAN_COORD, GNOMONIC_PROJ, ORTHOGRAPHIC_PROJ))
if (is.null(units)) {
global.units <- RFoptions()$coords$new_coordunits[1]
units <- if (global.units[1] == "") "km" else global.units
}
if (!is.matrix(coord)) coord <- t(coord)
z <- RFfctn(RMtrafo(new=system), coord, grid=grid,
coords.new_coord_system = "keep",
coords.new_coordunits=units,
coords.coord_system="earth")
return(dist(z, ...))
}
## used by RFratiotest, fitgauss, Crossvalidation, likelihood-ratio, RFempir
StandardizeData <- function(model,
x, y=NULL, z=NULL, T=NULL,
grid, data, distances=NULL,
RFopt, mindist_pts=2,
dim=NULL, allowFirstCols=TRUE, vdim = NULL, ...) {
#if (missing(x)) Print(data, T) else Print(data, T, x)
RFoptions(internal.examples_reduced=FALSE)
#Print(data); if (!missing(x)) print(x); Print(missing(x), y, z, T, missing(dim), missing(grid), missing(distances))
if (missing(dim)) dim <- NULL
if (missing(grid)) grid <- NULL
dist.given <- !missing(distances) && length(distances)>0
matrix.indep.of.x.assumed <- FALSE
rangex <- neu <- gridlist <- RFsp.coord <- gridTopology <- data.RFparams <-
mindist <- data.col <- NULL
if (missing(data)) stop("missing data")
missing.x <- missing(x)
if (isSpObj(data)) data <- sp2RF(data)
if (isRFsp <- is(data, "RFsp") || (is.list(data) && is(data[[1]], "RFsp"))){
## ||(is.list(data) && is(data[[1]], "RFsp")))
if ( (!missing.x && length(x)!=0) || length(y)!=0 || length(z) != 0 ||
length(T) != 0 || dist.given || length(dim)!=0 || length(grid) != 0)
stop("data object already contains information about the locations. So, none of 'x' 'y', 'z', 'T', 'distance', 'dim', 'grid' should be given.")
if (!is.list(data)) data <- list(data)
sets <- length(data)
x <- RFsp.coord <- gridTopology <- data.RFparams <- vector("list", sets)
if (!is.null(data[[1]]@.RFparams)) {
if (length(vdim) > 0) stopifnot( vdim == data[[1]]@.RFparams$vdim)
else vdim <- data[[1]]@.RFparams$vdim
}
dimdata <- NULL
dimensions <- (if (isGridded(data[[1]])) data[[1]]@grid@cells.dim
else nrow(data[[1]]@data))
dimensions <- c(dimensions, data[[1]]@.RFparams$vdim)
for (i in 1:length(data)) {
xi <- list()
xi$grid <- isGridded(data[[i]])
compareGridBooleans(grid, xi$grid)
data[[i]] <- selectDataAccordingFormula(data[[i]], model=model)
data.RFparams[[i]] <- data[[i]]@.RFparams
gridTopology[[i]] <- if (xi$grid) data[[i]]@grid else NULL
RFsp.coord[[i]] <- if (!xi$grid) data[[i]]@coords else NULL
dimensions <- if (xi$grid) data[[i]]@grid@cells.dim else nrow(data[[i]]@data)
dimensions <- c(dimensions, data[[i]]@.RFparams$vdim)
if (RFopt$general$vdim_close_together) dimensions <- rev(dimensions)
dimdata <- rbind(dimdata, c(dimensions, data[[i]]@.RFparams$n))
tmp <- RFspDataFrame2conventional(data[[i]])
xi$x <- tmp$x
if (!is.null(tmp$T)) xi$T <- tmp$T
data[[i]] <- as.matrix(tmp$data)
x[[i]] <- xi
}
idx <- if (RFopt$general$vdim_close_together) 1 else length(dimensions)
if (all(dimdata[, idx] == 1))
dimdata <- dimdata[, -idx, drop=FALSE]
if (all(dimdata[, ncol(dimdata)] == 1)) # repet
dimdata <- dimdata[, -ncol(dimdata), drop=FALSE]
} else { # !isRFsp
## dimdata wird spaeter bestimmt
if (dist.given) {
stopifnot(missing(x) || length(x)==0, length(y)==0, length(z)==0)
if (!is.list(distances)) {
distances <- list(distances)
if (is.list(data))
stop("if list of data is given then also for distances ")
data <- list(as.matrix(data))
} else if (!is.list(data)) {
stop("if list of distances is given then also for data ")
if (length(data) != length(distances))
stop("length of distances does not match length of data")
}
for (i in 1:length(distances)) {
if (any(is.na(data)))
stop("missing data are not allowed if distances are used.")
}
stopifnot(missing(T) || length(T)==0)
if (is.matrix(distances[[1]])) {
dimensions <- sapply(distances, nrow)
spatialdim <- tsdim <- xdimOZ <- dimensions[1]
if (length(dim) > 0 && dim != spatialdim)
stop("unclear specification of the distances: either the distances is given as a vector or distance vectors should given, where the number of rows matches the spatial dimension")
lcc <- sapply(distances, function(x) 0.5 * (1 + sqrt(1 + 8 * ncol(x))) )
if (!all(diff(dimensions) == 0))
stop("sets of distances show different dimensions")
range_distSq <- function(M) range(apply(M, 2, function(z) sum(z^2)))
rangex <- sqrt(range(sapply(distances, range_distSq)))
} else {
xdimOZ <- 1L
spatialdim <- tsdim <- as.integer(dim)
lcc <- sapply(distances, function(x) if (is.matrix(x)) -1
else 0.5 * (1 + sqrt(1 + 8* length(x))))
rangex <- range(sapply(distances, range))
}
# Print(mindist, rangex, RFopt$nugget$tol)
mindist <- min(rangex)
if (is.na(mindist)) mindist <- 1 ## nur 1 pkt gegeben, arbitraerer Wert
if (mindist <= RFopt$nugget$tol) {
if (!RFopt$general$allowdistanceZero)
stop("distance with value 0 identified -- use allowdistanceZero=T?")
mindist <- 1e-15 * (RFopt$nugget$tol == 0) + 2 * RFopt$nugget$tol
for (i in 1:length(distances))
if (is.vector(distances[[i]]))
distances[[i]][distances[[i]] == 0] <- mindist
else distances[[i]][1, apply(distances[[i]], 2,
function(z) sum(z^2))] <- mindist
}
len <- as.integer(lcc)
if (any(len != lcc)) stop("number of distances not of form k(k-1)/2")
neu <- CheckXT(distances=distances, dim = spatialdim)
coordunits <- RFopt$coords$coordunits
Zeit <- FALSE
} else { ## distances not given
if (is.data.frame(data) || !is.list(data)) {
# Print(missing(x), x, data, is.data.frame(data), !is.list(data))
if (!missing(x) && is.list(x) && !is.data.frame(x) &&
(length(x$grid)==0 || length(x$restot)==0))
stop("either both coordinates and data must be lists or none")
data <- list(data)
}
sets <- length(data)
for (i in 1:sets) {
if (is.data.frame(data[[i]]) || is.vector(data[[i]]))
data[[i]] <- as.matrix(data[[i]])
}
sel <- try(selectAccordingFormula(data[[1]], model), silent=TRUE)
if (is(sel, "try-error")) sel <- NULL
if (missing(x)) { ## dec 2012: matrix.indep.of.x.assumed
if (!is.null(dnames <- colnames(data[[1]]))) {
if ((!any(is.na(xi <- RFopt$coord$coordnames))) ||
(length(xi <- earth_coordinate_names(dnames)) == 2) ||
(length(xi <- cartesian_coordinate_names(dnames)) > 0) ||
(length(xi <- general_coordinate_names(dnames)) > 0) ) {
x <- list()
for (i in 1:sets) {
xx <- data[[i]][ , xi, drop=FALSE]
storage.mode(xx) <- "numeric"
x[[i]] <- list(x=xx, grid = FALSE)
if (length(sel) == 0) sel <- -xi
}
}
}
if (missing(x)) { ## if still missing
data.col <- try(data.columns(data[[1]], xdim=dim,
force=allowFirstCols,
halt=!allowFirstCols))
x <- list()
if (is(data.col, "try-error")) {
if (length(sel) > 0){
for (i in 1:sets) {
x[[i]] <- data[[i]][ , !sel, drop=FALSE]
storage.mode(x[[i]]) <- "numeric"
}
if (length(dim) == 0) {
warning("better give 'dim' explicitely.")
}
if (length(dim) > 0 && ncol(x[[i]]) != dim)
stop("'dim' does not match the recognized coordindates")
} else {
sel <- TRUE
data.col <- NULL
matrix.indep.of.x.assumed <- TRUE
for (i in 1:sets) {
x[[i]] <- 1:nrow(as.matrix(data[[i]]))
storage.mode(x[[i]]) <- "numeric"
}
}
### x[1] <- 0 ## so no grid ! ## why forbidding ?? 15.5.2015
} else {
for (i in 1:sets) {
xx <- data[[i]][, data.col$x, drop=FALSE]
storage.mode(xx) <- "numeric"
x[[i]] <- list(x=xx, grid=FALSE)
if (length(sel) == 0) sel <- data.col$data
}
}
}
for (i in 1:sets) {
data[[i]] <- data[[i]][ , sel, drop=FALSE]
storage.mode(data[[i]]) <- "numeric"
}
} ## xgiven; KEIN ELSE, auch wenn nachfolgend z.T. gedoppelt wird
if (is.data.frame(x)) x <- as.matrix(x)
if (is.list(x)) {
if (length(y)!=0 || length(z)!=0 || length(T)!=0)
stop("if x is alist 'y', 'z', 'T' may not be given")
if (!is.list(x[[1]])) {
if (length(data) == 1) x <- list(x)
else stop("number of sets of 'x' and 'data' differ")
}
} else {
x <- list(x=x)
if (length(y)!=0) {
stopifnot(!is.list(y))
x$y <- y
}
if (length(z)!=0) {
stopifnot(!is.list(z))
x$z <- z
}
if (length(T)!=0) {
stopifnot(!is.list(T))
x$T <- T
}
if (!is.null(grid))
x$grid <- grid
if (!is.list(data)) data <- list(as.matrix(data))
x <- list(x)
}
##}
} # ! distance
sets <- length(data)
dimdata <- matrix(nrow=sets, ncol=length(base::dim(data[[1]])))
for (i in 1:sets) dimdata[i, ] <- base::dim(data[[i]])
} # !isRFsp
if (!dist.given) { ## x coordinates, not distances
neu <- CheckXT(x=x) #, y=y, z=z, T=T, grid=grid, distances=distances,
# dim=dim, length) # , length.data=length(data[[i]]), printlevel = 0)
if (!is.list(neu[[1]])) neu <- list(neu)
coordunits<- neu[[1]]$coordunits
spatialdim <- as.integer(neu[[1]]$spatialdim)
Zeit <- neu[[1]]$Zeit
tsdim <- as.integer(spatialdim + Zeit)
len <- sapply(neu, function(x)
(if (x$grid) prod(x$x[3, ]) else nrow(x$x)) *
(if (Zeit) x$T[3] else 1))
getrange <- function(x)
if (x$grid) rbind(x$x[1, ], x$x[1, ] + x$x[2, ] * (x$x[3, ] - 1))
else apply(x$x, 2, range)
rangex <- sapply(neu, getrange)
## falls mehrere datasets:
if (ncol(x[[1]]$x) > 1 || is.null(x[[1]]$dist.given) || !x[[1]]$dist.given){
rangex <- t(rangex)
base::dim(rangex) <- c(length(rangex) / spatialdim, spatialdim)
}
rangex <- apply(rangex, 2, range)
getmindistSq <- function(x) {
if (x$grid) sum(x$x[2,]^2)
else if (nrow(x$x) < 2) NA
else if (nrow(x$x) <= mindist_pts) min(dist(x$x))
else min(dist(x$x[sample(nrow(x$x), mindist_pts), ]))
}
if (Zeit && any(sapply(neu, function(x) x$T[2]) <= RFopt$nugget$tol))
stop("step of time component smaller than nugget tolerance 'tol'")
if (any(sapply(neu, function(x) x$grid && any(x$x[2, ]<=RFopt$nugget$tol))))
stop("step of some spatial component smaller than nugget tolerance 'tol'")
zaehler <- 0
repeat {
mindist <- sqrt(min(sapply(neu, getmindistSq)))
if (is.na(mindist)) mindist <- 1 ## nur 1 pkt gegeben, arbitraerer Wert
if (mindist <= RFopt$nugget$tol) {
if (!RFopt$general$allowdistanceZero)
stop("Distance with value 0 identified -- use allowdistanceZero=T?")
if ((zaehler <- zaehler + 1) > 10)
stop("unable to scatter point pattern")
for (i in 1:length(neu)) if (!neu[[i]]$grid)
neu[[i]]$x <- neu[[i]]$x + rnorm(length(neu[[i]]$x), 0,
10 * RFopt$nugget$tol)
} else break;
}
xdimOZ <- ncol(neu[[1]]$x)
}
if (length(dim) > 0) stopifnot(dim == tsdim)
varnames <- try(colnames(data[[1]]))
## geht x[[1]]$x immer gut ??
# Print(missing(x), neu)
names <- GetDataNames(model=model,
coords=if (missing(x)) NULL else x[[1]]$x,
locinfo=neu[[1]]) #ohne data!
if (is.null(names$varnames))
names$varnames <-
if (class(varnames) == "try-error") NULL else varnames
restotal <- sapply(neu, function(x) x$restotal)
ldata <- sapply(data, length)
if (length(vdim) == 0) {
if (all(sapply(data, function(x) is.vector(x) || ncol(x) == 1)))
vdim <- 1
else if (!missing(model)) {
vdim <- rfInit(list("Cov", PrepareModel2(model=model, ...,
x=trafo.to.C_CheckXT(neu))),
x=x, y=y, z=z, T=T, grid=grid, distances=distances,
dim=dim, reg=MODEL_AUX, dosimulate=FALSE)[1]
} else vdim <- NA
}
repetitions <- as.integer(ldata / (restotal * vdim))
# Print(data, ldata, repetitions, restotal, vdim, neu, dist.given)
if (!is.na(vdim) && any(ldata != repetitions * restotal * vdim))
stop("mismatch of data dimensions")
RFoptions(internal.examples_reduced=RFopt$internal$examples_red)
return(list(
## coord = expandiertes neu # #
model = if (missing(model)) NULL else
PrepareModel2(model, ..., x=trafo.to.C_CheckXT(neu)),
orig.model = if (missing(model)) NULL else model,
data=data, dimdata=dimdata, isRFsp = isRFsp,
RFsp.coord = RFsp.coord,
coord = neu,
dist.given=dist.given,
gridTopology = gridTopology,
data.RFparams = data.RFparams,
spatialdim=spatialdim,
tsdim=tsdim,
rangex = as.matrix(rangex),
coordunits=coordunits,
Zeit = Zeit,
matrix.indep.of.x.assumed = matrix.indep.of.x.assumed,
len = len,
mindist = mindist,
xdimOZ = xdimOZ,
vdim = vdim,
coordnames=names$coordnames,
varnames=if (length(names$varnames)==0) ""
else names$varnames,
data.col = data.col,
repetitions = repetitions
))
}
|
#dependencias
if(!require("pacman")) install.packages("pacman")
p_load(dplyr)
p_load(tidyr)
p_load(jsonlite)
p_load(purrr)
| /generador-de-mapas.R | no_license | RayanroBryan/rastreador_covid_19_costa_rica | R | false | false | 126 | r | #dependencias
if(!require("pacman")) install.packages("pacman")
p_load(dplyr)
p_load(tidyr)
p_load(jsonlite)
p_load(purrr)
|
#####################################################################################################
#### Forest soils dataviz script ###################
#### mark.farrell@csiro.au +61 8 8303 8664 31/05/2021 ################################
#####################################################################################################
#### Set working directory ####
setwd("/Users/markfarrell/OneDrive - CSIRO/Data/ForestSoils")
#### Packages ####
install.packages("ggtern")
install.packages("ggdist")
install.packages("ggridges")
install.packages("scales")
library(tidyverse)
library(janitor)
library(PerformanceAnalytics)
library(corrplot)
library(RColorBrewer)
library(plotrix)
library(ggpmisc)
#library(ggtern)
library(ggbluebadge)
library(ggdist)
library(magrittr)
library(lubridate)
library(vegan)
library(ape)
library(RVAideMemoire)
library(BiodiversityR)
library(patchwork)
library(ggridges) #masks a lot of ggdist
library(scales)
#### Colours ####
# No margin
par(mar=c(0,0,1,0))
# Classic palette Spectral, with 11 colors
coul <- brewer.pal(11, "Spectral")
# Add more colors to this palette :
coul17 <- colorRampPalette(coul)(17)
# Plot it
pie(rep(1, length(coul17)), col = coul17 , main="")
# Classic palette Spectral, with 11 colors
coul <- brewer.pal(11, "Spectral")
# Add more colors to this palette :
coul11 <- colorRampPalette(coul)(11)
# Plot it
pie(rep(1, length(coul11)), col = coul11 , main="")
# Classic palette Spectral, with 11 colors
coul <- brewer.pal(11, "Spectral")
# Add more colors to this palette :
coul8 <- colorRampPalette(coul)(8)
# Plot it
pie(rep(1, length(coul8)), col = coul8 , main="")
# Output the palettes for reference
x<-list(coul8, coul11, coul17, coul125)
y<-tibble(column1= map_chr(x, str_flatten, " "))
write_csv(y, "colours.csv")
coul_inflow <- brewer.pal(11, "BrBG")
coul125 <- colorRampPalette(coul_inflow)(125)
#### data in ####
sum <- read_csv("data/processed/summary.csv")
all <- read_csv("data/processed/ChemAll_adm_OLremPLFA.csv")
sum %<>% mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun), as.factor))
str(sum)
all %<>% mutate(Date = dmy(Date)) %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
mutate("Sampling Period" = case_when(
Date >= as_date("2019-03-25") & Date <= as_date("2019-03-28") ~ "Autumn 2019",
Date >= as_date("2019-07-29") & Date <= as_date("2019-07-31") ~ "Winter 2019",
Date >= as_date("2019-11-04") & Date <= as_date("2019-11-06") ~ "At flooding",
Date >= as_date("2020-02-03") & Date <= as_date("2020-02-05") ~ "3 months post flood",
Date >= as_date("2020-10-13") & Date <= as_date("2020-10-15") ~ "11 months post flood"
)
) %>%
relocate("Sampling Period", .after = Date) %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos, "Sampling Period"), as.factor))
str(all)
#### Ternary plot ####
# This is best run standalone as {ggtern} masks a lot of ggplot
ggtern(data=sum, aes(Sand,Clay,Silt, color = Transect)) +
geom_point(size = 4) +
theme_rgbw() +
theme_hidetitles() +
theme(text = element_text(size=20)) +
theme(legend.key=element_blank())
#### MIR ####
# MIR import
mir <- read_csv("data/working/MasterFieldDataFC_NSW - MIR_raw.csv")
cols_condense(mir)
dim(mir)
mir <- mir %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
relocate(PlotPos, .after = Plot) %>%
mutate("Sampling Period" = case_when(
Date >= as_date("2019-03-25") & Date <= as_date("2019-03-28") ~ "Autumn 2019",
Date >= as_date("2019-07-29") & Date <= as_date("2019-07-31") ~ "Winter 2019",
Date >= as_date("2019-11-04") & Date <= as_date("2019-11-06") ~ "At flooding",
Date >= as_date("2020-02-03") & Date <= as_date("2020-02-05") ~ "3 months post flood",
Date >= as_date("2020-10-13") & Date <= as_date("2020-10-15") ~ "11 months post flood"
)
) %>%
relocate("Sampling Period", .after = Date)
mir$`Sampling Period` <- as.factor(mir$`Sampling Period`)
str(mir)
levels(mir$`Sampling Period`)
mir <- mir %>%
mutate(`Sampling Period` = fct_relevel(`Sampling Period`, #remember the back-ticks (would probably have solved factor palaver too)
"Autumn 2019",
"Winter 2019",
"At flooding",
"3 months post flood",
"11 months post flood"
))
# initial check plot
spec <- mir %>%
select(2, 27:1997)
waves <- seq(7999.27979, 401.121063, by = -3.8569)
colnames(spec[,2:1972]) <- waves
matplot(x = waves,
y = t(spec[2:1972]),
ylim = c(0, 3.5),
type = "l",
lty = 1,
main = "Raw spectra",
xlab = "Wavenumber (cm-1)",
ylab = "Absorbance",
col = rep(palette(), each = 3)
)
# Interpolation
mirinterp <- spec
mirinterp1 <- new("hyperSpec", # makes the hyperspec object
spc = mirinterp[, grep('[[:digit:]]', colnames(mirinterp))],
wavelength = as.numeric(colnames(mirinterp)[grep ('[[:digit:]]', colnames(mirinterp))]),
label = list(.wavelength = "Wavenumber",
spc = "Intensity"))
mirinterp3 <- hyperSpec::spc.loess(mirinterp1, c(seq(6000, 600, -4)))
# plot(mirinterp3, "spc", wl.reverse = T, col = rep(palette(), each = 3))
output <- mirinterp3[[]]
waves_l <- seq(6000, 600, by = -4)
colnames(output) <- waves_l
ID <- as.data.frame(mir$UniqueID)
final <- cbind(ID, output) #This is now the re-sampled df. Still needs baselining.
matplot(x = waves_l, y = t(final[,2:1352]), ylim=c(0,3), type = "l", lty = 1,
main = "Absorbance - 600 to 6000 & reample with resolution of 4", xlab = "Wavelength (nm)",
ylab = "Absorbance", col = rep(palette(), each = 3))
# baseline offset
spoffs2 <- function (spectra)
{
if (missing(spectra)) {
stop("No spectral data provided")
}
if (spectra[1, 1] < spectra[1, dim(spectra)[2]]) {
spectra <- t(apply(spectra, 1, rev))
}
s <- matrix(nrow = dim(spectra)[1], ncol = dim(spectra)[2])
for (i in 1:dim(spectra)[1]) {
s[i, ] <- spectra[i, ] - min(spectra[i, ])
}
output <- rbind(spectra[1, ], s)
output <- output[-1,]
}
spec_a_bc_d <- spoffs2(final[,2:1352])
dim(spec_a_bc_d)
head(spec_a_bc_d)
waves_ss <- seq(600, 6000, by=4)
matplot(x = waves_ss, y = t(spec_a_bc_d), ylim=c(0,2), xlim=rev(c(600, 6000)), type = "l", lty = 1,
main = "Absorbance - baseline corrected", xlab = expression("Wavenumber" ~ (cm^{-1})),
ylab = "Absorbance", col = rep(palette(), each = 3))
finalb <- cbind(ID, spec_a_bc_d) %>% #This is now the baselined and re-sampled df.
rename(UniqueID = "mir$UniqueID")
# combine data
mir_meta <- all %>%
select(UniqueID, Date, `Sampling Period`, Transect, Plot, PlotPos, Easting, Northing, Height, RHeight, RTHeight, Inun, Moisture)
mir_proc <- left_join(mir_meta, finalb, by = "UniqueID")
## Multivariate Exploration and Analysis
## MIR
# Prep
tmir <- mir_proc %>%
mutate(across(c(14:1364), ~((.+10)^(1/4))))
z.fn <- function(x) {
(x-mean(x))/sd(x)
}
stmir <- tmir %>%
mutate(across(c(14:1364), ~z.fn(.)))
fmir <- stmir %>%
select(1:13)
dmir <- stmir %>%
select(14:1363)
distmir <- vegdist(dmir, method = "manhattan", na.rm = TRUE)
pmir <- pcoa(distmir)
pmir$values$Relative_eig[1:10]
barplot(pmir$values$Relative_eig[1:10])
mir_points <- bind_cols(fmir, (as.data.frame(pmir$vectors)))
# Plot
ggplot(mir_points) +
geom_point(aes(x=Axis.1, y=Axis.2, colour = Transect, shape = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "PCoA Axis 1; 81.0%",
y = "PCoA Axis 2; 7.9%")
# Permanova
set.seed(1983)
perm_mir <- adonis2(distmir~Transect*`Sampling Period`, data = stmir, permutations = 9999, method = "manhattan")
perm_mir #strong impact of transect, weak of sampling time
permpt_mir <- pairwise.perm.manova(distmir, stmir$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_mir
permpd_mir <- pairwise.perm.manova(distmir, stmir$`Sampling Period`, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpd_mir #sniff of significance for last sampling vs 1st three samplings
perm_mirh <- adonis2(distmir~Transect*RTHeight, data = stmir, permutations = 9999, method = "manhattan")
perm_mirh #strong height interaction
# CAP by transect
stmir <- as.data.frame(stmir)
cap_mirt <- CAPdiscrim(distmir~Transect, data = stmir, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 999)
saveRDS(cap_mirt, file = "outputs/MIRCAP.rds")
readRDS("outputs/MIRCAP.rds")
round(cap_mirt$F/sum(cap_mirt$F), digits=3)
barplot(cap_mirt$F/sum(cap_mirt$F))
cap_mirt_points <- bind_cols((as.data.frame(cap_mirt$x)), fmir)
glimpse(cap_mirt_points)
ggplot(cap_mirt_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "CAP Axis 1; 41.2%",
y = "CAP Axis 2; 35.3%")
# CAP + spider
mir_cent <- aggregate(cbind(LD1, LD2) ~ Transect, data = cap_mirt_points, FUN = mean)
mir_segs <- merge(cap_mirt_points, setNames(mir_cent, c('Transect', 'oLD1', 'oLD2')), by = 'Transect', sort = FALSE)
ggplot(cap_mirt_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 3, alpha = .7) +
geom_segment(data = mir_segs, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = Transect), alpha = .5, size = .25) +
geom_point(data = mir_cent, mapping = aes(x = LD1, y = LD2, colour = Transect), size = 5, alpha = 1.0) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "CAP Axis 1; 41.2%",
y = "CAP Axis 2; 35.3%")
#### Metals PCA ####
metals <- sum %>%
select(c(1:11, 45:65)) %>%
select(-c(As, Cd, Mo, Sb, Se)) %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, PlotPos), as.factor))
metals %<>%
mutate(P = log1p(P),
Na = log1p(Na),
Mg = log1p(Mg),
K = log1p(K),
Co = log1p(Co),
Ca = log1p(Ca))
chart.Correlation(metals[13:28])
pca_metals <- princomp(metals[13:28], cor = TRUE, scores = TRUE)
biplot(pca_metals, choices = c(1,2))
summary(pca_metals) #PC1 = 58.3%, PC2 = 13.9%
scores_metals <- as.data.frame(pca_metals[["scores"]]) %>%
select(1:2)
metals_plot <- bind_cols(metals, scores_metals)
metals_cent <- aggregate(cbind(Comp.1, Comp.2) ~ Transect, data = metals_plot, FUN = mean)
metals_segs <- merge(metals_plot, setNames(metals_cent, c('Transect', 'PC1', 'PC2')), by = 'Transect', sort = FALSE)
ggplot(metals_plot) +
geom_point(aes(x=Comp.1, y=Comp.2, colour = Transect, shape = PlotPos), size = 3, alpha = .7) +
geom_segment(data = metals_segs, mapping = aes(x = Comp.1, y = Comp.2, xend = PC1, yend = PC2, colour = Transect), alpha = .5, size = .25) +
geom_point(data = metals_cent, mapping = aes(x = Comp.1, y = Comp.2, colour = Transect), size = 5, alpha = 1.0) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "PCA Axis 1; 58.3%",
y = "PCA Axis 2; 13.9%")
#### BW ####
# Landscape data plots
RTHeight <- ggplot(sum) +
stat_halfeye(aes(y = RTHeight),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#9E0142") +
geom_point(aes(x = 0, y = RTHeight, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = RTHeight),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Relative height in toposequence (m)",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
TWI <- ggplot(sum) +
stat_halfeye(aes(y = TWI),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#D53E4F") +
geom_point(aes(x = 0, y = TWI, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = TWI),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Topographic wetness index",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
TPI <- ggplot(sum) +
stat_halfeye(aes(y = TPI),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#F46D43") +
geom_point(aes(x = 0, y = TPI, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = TPI),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Topographic position index",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Slope <- ggplot(sum) +
stat_halfeye(aes(y = Slope),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FDAE61") +
geom_point(aes(x = 0, y = Slope, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Slope),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Slope",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
planCurv <- ggplot(sum) +
stat_halfeye(aes(y = planCurv),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FEE08B") +
geom_point(aes(x = 0, y = planCurv, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = planCurv),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Plan curvature",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
proCurv <- ggplot(sum) +
stat_halfeye(aes(y = proCurv),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FFFFBF") +
geom_point(aes(x = 0, y = proCurv, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = proCurv),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Profile curvature",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
NDVI <- ggplot(all) +
stat_halfeye(aes(y = NDVI),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#E6F598") +
geom_point(aes(x = 0, y = NDVI, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = NDVI),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Normalised difference vegetation index (NDVI)",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Wet <- ggplot(all) +
stat_halfeye(aes(y = Wet),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#ABDDA4") +
geom_point(aes(x = 0, y = Wet, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Wet),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Soil moisture by synthetic aperture radar (Sentinel)",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Moisture <- ggplot(all) +
stat_halfeye(aes(y = Moisture),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#66C2A5") +
geom_point(aes(x = 0, y = Moisture, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Moisture),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Soil moisture (g"~g^-1~" dry weight)"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
WHC <- ggplot(sum) +
stat_halfeye(aes(y = WHC),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#3288BD") +
geom_point(aes(x = 0, y = WHC, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = WHC),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Water holding capacity (g"~g^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
BD0_30 <- ggplot(sum) +
stat_halfeye(aes(y = BD0_30),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#5E4FA2") +
geom_point(aes(x = 0, y = BD0_30, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = BD0_30),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Bulk density (g"~cm^-3~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
RTHeight + TWI + TPI + Slope + planCurv + proCurv + NDVI +
Wet + Moisture + WHC + BD0_30 + guide_area() +
plot_layout(ncol = 6, guides = 'collect') +
plot_annotation(tag_levels = 'a') &
theme(plot.tag.position = c(1, 1),
plot.tag = element_text(size = 16, hjust = 4, vjust = 2))
#y = expression ("Bulk density g"~cm^-3)
# Chem data
pHc <- ggplot(all) +
stat_halfeye(aes(y = pHc),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#9E0142") +
geom_point(aes(x = 0, y = pHc, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = pHc),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression (~pH[CaCl[2]]),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
EC <- ggplot(all) +
stat_halfeye(aes(y = EC),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#C0247A") +
geom_point(aes(x = 0, y = EC, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = EC),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Electrical conductivity (dS "~m^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
CEC <- ggplot(sum) +
stat_halfeye(aes(y = CEC),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#DC494C") +
geom_point(aes(x = 0, y = CEC, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = CEC),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Cation exchange capacity ("~cmol^+~" "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
PC1 <- ggplot(metals_plot) +
stat_halfeye(aes(y = Comp.1),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#F06744") +
geom_point(aes(x = 0, y = Comp.1, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Comp.1),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total elements principal component 1, 58.3% of variance"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
PC2 <- ggplot(metals_plot) +
stat_halfeye(aes(y = Comp.2),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#F88D51") +
geom_point(aes(x = 0, y = Comp.2, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Comp.2),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total elements principal component 2, 13.9% of variance"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
P <- ggplot(sum) +
stat_halfeye(aes(y = P),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FDB466") +
geom_point(aes(x = 0, y = P, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = P),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total phosphorus (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
K <- ggplot(sum) +
stat_halfeye(aes(y = K),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FDD380") +
geom_point(aes(x = 0, y = K, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = K),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total potassium (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
S <- ggplot(sum) +
stat_halfeye(aes(y = S),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FEEB9E") +
geom_point(aes(x = 0, y = S, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = S),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total sulphur (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
TotOC <- sum %>% drop_na(TotOC_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = TotOC_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FFFFBF") +
geom_point(aes(x = 0, y = TotOC_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = TotOC_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total organic carbon (g "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
TotN <- sum %>% drop_na(TotN_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = TotN_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#EFF8A6") +
geom_point(aes(x = 0, y = TotN_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = TotN_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total nitrogen (g "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
CN <- sum %>% drop_na(CN_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = CN_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#D7EF9B") +
geom_point(aes(x = 0, y = CN_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = CN_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("C:N ratio"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
d13C <- sum %>% drop_na(d13C_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = d13C_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#B2E0A2") +
geom_point(aes(x = 0, y = d13C_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = d13C_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression (paste(delta^{13}, "C (\u2030)")),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
d15N <- sum %>% drop_na(d15N_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = d15N_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#88CFA4") +
geom_point(aes(x = 0, y = d15N_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = d15N_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression (paste(delta^{15}, "N (\u2030)")),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
POC <- sum %>% drop_na(POC_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = POC_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#5FBAA8") +
geom_point(aes(x = 0, y = POC_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = POC_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Particulate organic carbon (g "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
HOC <- sum %>% drop_na(HOC_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = HOC_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#3F96B7") +
geom_point(aes(x = 0, y = HOC_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = HOC_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Humus organic carbon (g "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
ROC <- sum %>% drop_na(ROC_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = ROC_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#4272B2") +
geom_point(aes(x = 0, y = ROC_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = ROC_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Resistant organic carbon (g "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Vuln <- sum %>% drop_na(Vuln_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Vuln_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#5E4FA2") +
geom_point(aes(x = 0, y = Vuln_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Vuln_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Organic carbon vulnerability"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
pHc + EC + CEC + PC1 + PC2 + P + K + S + TotOC +
TotN + CN + d13C + d15N + POC + HOC + ROC + Vuln + guide_area() +
plot_layout(ncol = 6, guides = 'collect') +
plot_annotation(tag_levels = 'a') &
theme(plot.tag.position = c(1, 1),
plot.tag = element_text(size = 16, hjust = 2, vjust = 2))
### Dynamic
NO3 <- all %>% drop_na(NO3) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = NO3),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#9E0142") +
geom_point(aes(x = 0, y = NO3, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = NO3),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Extractable "~NO[3]^{"-"}~"-N (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
NH4 <- all %>% drop_na(NH4) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = NH4),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#E25249") +
geom_point(aes(x = 0, y = NH4, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = NH4),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Extractable "~NH[4]^{"+"}~"-N (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
FAA <- all %>% drop_na(FAA) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = FAA),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FBA45C") +
geom_point(aes(x = 0, y = FAA, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = FAA),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Extractable free amino acid-N (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
DON <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = DON),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FEE899") +
geom_point(aes(x = 0, y = DON, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = DON),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Dissolved organic N (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
DOC <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = DOC),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#EDF7A3") +
geom_point(aes(x = 0, y = DOC, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = DOC),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Dissolved organic C (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
MBC <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = MBC),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#A1D9A4") +
geom_point(aes(x = 0, y = MBC, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = MBC),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Microbial biomass C (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
MBN <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = MBN),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#48A0B2") +
geom_point(aes(x = 0, y = MBN, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = MBN),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Microbial biomass N (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
AvailP <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = AvailP),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#5E4FA2") +
geom_point(aes(x = 0, y = AvailP, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = AvailP),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Olsen-extractable P (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
NO3 + NH4 + FAA + DON + DOC + MBC + MBN + AvailP +
plot_layout(ncol = 4, guides = 'collect') +
plot_annotation(tag_levels = 'a') &
theme(plot.tag.position = c(0, 1),
plot.tag = element_text(size = 16, hjust = -12, vjust = 2))
### Microbial
Proteolysis <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Proteolysis),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#9E0142") +
geom_point(aes(x = 0, y = Proteolysis, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Proteolysis),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Proteolysis rate (mg AA-N"~kg^-1~h^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
AAMin_k1 <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = AAMin_k1),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#D53E4F") +
geom_point(aes(x = 0, y = AAMin_k1, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = AAMin_k1),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Rate of initial AA mineralisation ("~h^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
MicY <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = MicY),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#F46D43") +
geom_point(aes(x = 0, y = MicY, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = MicY),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Microbial yield"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
TotalPLFA <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = TotalPLFA),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FDAE61") +
geom_point(aes(x = 0, y = TotalPLFA, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = TotalPLFA),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total PLFA (nmol "~g^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Bac <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Bac),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FEE08B") +
geom_point(aes(x = 0, y = Bac, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Bac),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Bacterial PLFA (nmol "~g^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Fun <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Fun),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FFFFBF") +
geom_point(aes(x = 0, y = Fun, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Fun),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Fungal PLFA (nmol "~g^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Gpos <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Gpos),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#E6F598") +
geom_point(aes(x = 0, y = Gpos, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Gpos),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("G+ bacterial PLFA (nmol "~g^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Gneg <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Gneg),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#ABDDA4") +
geom_point(aes(x = 0, y = Gneg, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Gneg),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("G- bacterial PLFA (nmol "~g^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Act <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Act),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#66C2A5") +
geom_point(aes(x = 0, y = Act, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Act),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Actinomycete PLFA (nmol "~g^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
F_B <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = F_B),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#3288BD") +
geom_point(aes(x = 0, y = F_B, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = F_B),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Fungal:Bacterial ratio"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Gp_Gn <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Gp_Gn),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#5E4FA2") +
geom_point(aes(x = 0, y = Gp_Gn, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Gp_Gn),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Gram+:Gram- ratio"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Proteolysis + AAMin_k1 + MicY + TotalPLFA + Bac + Fun +
Gpos + Gneg + Act + F_B + Gp_Gn + guide_area() +
plot_layout(ncol = 6, guides = 'collect') +
plot_annotation(tag_levels = 'a') &
theme(plot.tag.position = c(1, 1),
plot.tag = element_text(size = 16, hjust = 4, vjust = 2))
#### xy plots ####
# Add plot position
sum %<>% group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos), as.factor))
str(sum)
# isotopes
#CN
cn_c <- ggplot(sum) +
geom_point(aes(x=CN_mean, y=d13C_mean, colour = PlotPos), size = 3) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "C:N ratio",
y = expression (paste(delta^{13}, "C (\u2030)")),
colour = "Plot position")
cn_n <- ggplot(sum) +
geom_point(aes(x=CN_mean, y=d15N_mean, colour = PlotPos), size = 3) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "C:N ratio",
y = expression (paste(delta^{15}, "N (\u2030)")),
colour = "Plot position")
#vuln
vuln_c <- ggplot(sum) +
geom_point(aes(x=Vuln_mean, y=d13C_mean, colour = PlotPos), size = 3) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "SOC vulnerability",
y = expression (paste(delta^{13}, "C (\u2030)")),
colour = "Plot position")
vuln_n <- ggplot(sum) +
geom_point(aes(x=Vuln_mean, y=d15N_mean, colour = PlotPos), size = 3) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "SOC vulnerability",
y = expression (paste(delta^{15}, "N (\u2030)")),
colour = "Plot position")
#iso only
iso <- ggplot(sum) +
geom_point(aes(x=d13C_mean, y=d15N_mean, colour = PlotPos), size = 3) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = expression (paste(delta^{13}, "C (\u2030)")),
y = expression (paste(delta^{15}, "N (\u2030)")),
colour = "Plot position")
cn_c + cn_n + iso + vuln_c + vuln_n + guide_area()
plot_layout(ncol = 3, guides = 'collect') +
plot_annotation(tag_levels = 'a') &
theme(plot.tag.position = c(0, 1),
plot.tag = element_text(size = 16, hjust = -5, vjust = 1))
#### local scale ####
#### biogeochem ####
t1_summary <- read_csv("data/processed/summary.csv")
t1_summary <- t1_summary %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos), as.factor))
str(t1_summary)
t1_summary <- t1_summary %>%
relocate(where(is.character))
bgc_mean <- t1_summary %>%
select(UniqueID, Transect, Plot, PlotPos, Easting, Northing, Height, RHeight, RTHeight, Inun,
Clay, CEC, WHC, BD0_30, NDVI_mean, Wet_mean, Moisture_mean, pHc_mean, EC_mean, AvailP_mean, CN_mean, Vuln_mean,
d13C_mean, d15N_mean, DOC_mean, NO3_mean, NH4_mean, FAA_mean, Proteolysis_mean,
AAMin_k1_mean, DON_mean, MBC_mean, MBN_mean, MicY_mean)
#pre-prep - PCA of total emlements to reduce dimenstions
tot_elms <- t1_summary %>%
select(47:66) %>%
select(!c(As, B, Cd, Mo, Sb, Se))
chart.Correlation(tot_elms)
ttot_elms <- tot_elms %>%
mutate(P = log1p(P),
Na = log1p(Na),
Mg = log1p(Mg),
K = log1p(K),
Co = log1p(Co),
Ca = log1p(Ca))
chart.Correlation(ttot_elms)
pca_elms <- princomp(ttot_elms, cor = TRUE, scores = TRUE)
biplot(pca_elms, choices = c(1,2))
summary(pca_elms) #PC1 = 59.2%, PC2 = 11.7%
scores_elms <- as.data.frame(pca_elms[["scores"]]) %>%
select(1:2)
#prep
bgc_mean <- cbind(bgc_mean, scores_elms)
bgc_cor <- select(bgc_mean, 11:36)
chart.Correlation(bgc_cor, histogram=TRUE, pch=19)
tbgc_mean <- bgc_mean %>%
mutate(MBN_mean = log1p(MBN_mean),
NH4_mean = log1p(NH4_mean),
AvailP_mean = log1p(AvailP_mean),
EC_mean = log1p(EC_mean),
pHc_mean = log1p(pHc_mean),
BD0_30 = log1p(BD0_30))
stbgc_mean <- tbgc_mean %>%
mutate(across(c(11:36), ~z.fn(.)))
fbgc <- stbgc_mean %>%
select(1:10)
dbgc <- stbgc_mean %>%
select(11:36)
# PCoA
distbgc <- vegdist(dbgc, method = "euclidean", na.rm = TRUE)
pbgc <- pcoa(distbgc)
pbgc$values$Relative_eig[1:10]
barplot(pbgc$values$Relative_eig[1:10])
bgc_points <- bind_cols(fbgc, (as.data.frame(pbgc$vectors)))
compute.arrows = function (given_pcoa, orig_df) {
orig_df = orig_df #can be changed to select columns of interest only
n <- nrow(orig_df)
points.stand <- scale(given_pcoa$vectors)
S <- cov(orig_df, points.stand) #compute covariance of variables with all axes
pos_eigen = given_pcoa$values$Eigenvalues[seq(ncol(S))] #select only +ve eigenvalues
U <- S %*% diag((pos_eigen/(n - 1))^(-0.5)) #Standardise value of covariance
colnames(U) <- colnames(given_pcoa$vectors) #Get column names
given_pcoa$U <- U #Add values of covariates inside object
return(given_pcoa)
}
pbgc = compute.arrows(pbgc, dbgc)
pbgc_arrows_df <- as.data.frame(pbgc$U*10) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
# Plot
ggplot(bgc_points) +
geom_point(aes(x=Axis.1, y=Axis.2, colour = PlotPos), size = 6) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = pbgc_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = pbgc_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 25.6%",
y = "PCoA Axis 2; 16.2%")
# Permanova
set.seed(1983)
perm_bgc <- adonis2(distbgc~Transect+PlotPos, data = stbgc_mean, permutations = 9999, method = "euclidean")
perm_bgc #strong impact of transect and plot
permpt_bgc <- pairwise.perm.manova(distbgc, stbgc_mean$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_bgc #.098 is lowest possible - several pairwise comps have this
permpp_bgc <- pairwise.perm.manova(distbgc, stbgc_mean$PlotPos, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpp_bgc #4 is sig diff from 1&2. 3 borderline diff from 1&2. 1 borderline diff from 2
# CAP by transect
stbgc_mean <- as.data.frame(stbgc_mean)
cap_bgct <- CAPdiscrim(distbgc~Transect, data = stbgc_mean, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 999)
cap_bgct <- add.spec.scores(cap_bgct, dbgc, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
saveRDS(cap_bgct, file = "data/processed/CAP_bgct.rds")
round(cap_bgct$F/sum(cap_bgct$F), digits=3)
barplot(cap_bgct$F/sum(cap_bgct$F))
cap_bgct_points <- bind_cols((as.data.frame(cap_bgct$x)), fbgc)
glimpse(cap_bgct_points)
cap_bgct_arrows <- as.data.frame(cap_bgct$cproj*5) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
ggplot(cap_bgct_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_bgct_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_bgct_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 56.7%",
y = "CAP Axis 2; 23.0%")
# CAP by transect + spider
bgc_centt <- aggregate(cbind(LD1, LD2) ~ Transect, data = cap_bgct_points, FUN = mean)
bgc_segst <- merge(cap_bgct_points, setNames(bgc_centt, c('Transect', 'oLD1', 'oLD2')), by = 'Transect', sort = FALSE)
cap_bgct_fig <- ggplot(cap_bgct_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 3, alpha = .6) +
geom_segment(data = bgc_segst, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = Transect), alpha = .7, size = .25) +
geom_point(data = bgc_centt, mapping = aes(x = LD1, y = LD2, colour = Transect), size = 5) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_bgct_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_bgct_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 56.7%",
y = "CAP Axis 2; 23.0%",
colour = "Toposequence",
shape = "Plot position")
# CAP by plotpos
stbgc_mean <- as.data.frame(stbgc_mean)
cap_bgcp <- CAPdiscrim(distbgc~PlotPos, data = stbgc_mean, axes = 10, m = 3, mmax = 10, add = FALSE, permutations = 999)
cap_bgcp <- add.spec.scores(cap_bgcp, dbgc, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
saveRDS(cap_bgcp, file = "data/processed/CAP_bgcp.rds")
round(cap_bgcp$F/sum(cap_bgcp$F), digits=3)
barplot(cap_bgcp$F/sum(cap_bgcp$F))
cap_bgcp_points <- bind_cols((as.data.frame(cap_bgcp$x)), fbgc)
glimpse(cap_bgcp_points)
cap_bgcp_arrows <- as.data.frame(cap_bgcp$cproj*3) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
ggplot(cap_bgcp_points) +
geom_point(aes(x=LD1, y=LD2, colour = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_bgcp_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_bgcp_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 76.3%",
y = "CAP Axis 2; 23.7%")
# CAP by plot + spider
bgc_centp <- aggregate(cbind(LD1, LD2) ~ PlotPos, data = cap_bgcp_points, FUN = mean)
bgc_segsp <- merge(cap_bgcp_points, setNames(bgc_centp, c('PlotPos', 'oLD1', 'oLD2')), by = 'PlotPos', sort = FALSE)
cap_bgcpfig <- ggplot(cap_bgcp_points) +
geom_point(aes(x=LD1, y=LD2, colour = PlotPos), size = 3, alpha = .6) +
geom_segment(data = bgc_segsp, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = PlotPos), alpha = .9, size = .3) +
geom_point(data = bgc_centp, mapping = aes(x = LD1, y = LD2, colour = PlotPos), size = 5) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_bgcp_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_bgcp_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 76.3%",
y = "CAP Axis 2; 23.7%",
colour = "Plot position")
cap_bgct_fig + cap_bgcpfig +
plot_layout(ncol = 1) +
plot_annotation(tag_levels = 'a') &
theme(plot.tag.position = c(0, 1),
plot.tag = element_text(size = 16, hjust = -5, vjust = 1))
#### temporal ####
OL_cor <- read_csv("data/processed/ChemAll_adm_OLrem.csv")
OL_cor <- OL_cor %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos), as.factor)) %>%
mutate(Date = dmy(Date))
str(OL_cor)
plfa <- read_csv("data/working/MasterFieldDataFC_NSW - PLFAs.csv")
plfa <- plfa %>%
mutate(Date = dmy(Date)) %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
mutate("Sampling Period" = case_when(
Date >= as_date("2019-03-25") & Date <= as_date("2019-03-28") ~ "Autumn 2019",
Date >= as_date("2019-07-29") & Date <= as_date("2019-07-31") ~ "Winter 2019",
Date >= as_date("2019-11-04") & Date <= as_date("2019-11-06") ~ "At flooding",
Date >= as_date("2020-02-03") & Date <= as_date("2020-02-05") ~ "3 months post flood",
Date >= as_date("2020-10-13") & Date <= as_date("2020-10-15") ~ "11 months post flood"
)
) %>%
relocate("Sampling Period", .after = Date) %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos, "Sampling Period"), as.factor))
plfa <- plfa %>%
mutate(`Sampling Period` = fct_relevel(`Sampling Period`, #remember the back-ticks (would probably have solved factor palaver too)
"Autumn 2019",
"Winter 2019",
"At flooding",
"3 months post flood",
"11 months post flood"
))
str(plfa)
OLP_cor <- read_csv("data/processed/ChemAll_adm_OLremPLFA.csv")
OLP_cor <- OLP_cor %>%
mutate(Date = dmy(Date)) %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
mutate("Sampling Period" = case_when(
Date >= as_date("2019-03-25") & Date <= as_date("2019-03-28") ~ "Autumn 2019",
Date >= as_date("2019-07-29") & Date <= as_date("2019-07-31") ~ "Winter 2019",
Date >= as_date("2019-11-04") & Date <= as_date("2019-11-06") ~ "At flooding",
Date >= as_date("2020-02-03") & Date <= as_date("2020-02-05") ~ "3 months post flood",
Date >= as_date("2020-10-13") & Date <= as_date("2020-10-15") ~ "11 months post flood"
)
) %>%
relocate("Sampling Period", .after = Date) %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos, "Sampling Period"), as.factor))
str(OLP_cor)
OL_cor <- OL_cor %>%
mutate("Sampling Period" = case_when(
Date >= as_date("2019-03-25") & Date <= as_date("2019-03-28") ~ "Autumn 2019",
Date >= as_date("2019-07-29") & Date <= as_date("2019-07-31") ~ "Winter 2019",
Date >= as_date("2019-11-04") & Date <= as_date("2019-11-06") ~ "At flooding",
Date >= as_date("2020-02-03") & Date <= as_date("2020-02-05") ~ "3 months post flood",
Date >= as_date("2020-10-13") & Date <= as_date("2020-10-15") ~ "11 months post flood"
)
) %>%
relocate("Sampling Period", .after = Date)
OL_cor$`Sampling Period` <- as.factor(OL_cor$`Sampling Period`)
str(OL_cor)
levels(OL_cor$`Sampling Period`)
OL_cor <- OL_cor %>%
mutate(`Sampling Period` = fct_relevel(`Sampling Period`, #remember the back-ticks (would probably have solved factor palaver too)
"Autumn 2019",
"Winter 2019",
"At flooding",
"3 months post flood",
"11 months post flood"
))
temporalP <- OLP_cor %>%
select(UniqueID, Date, `Sampling Period`, Transect, Plot, PlotPos, Easting, Northing, Height, RHeight, RTHeight, Inun,
NDVI, VH, VV, Wet, Moisture, pHc, EC, AvailP,
DOC, DTN, NO3, NH4, FAA, Proteolysis, AAMin_k1, DON, MBC,
MBN, MicY, MicCN, TotalPLFA, F_B, Gp_Gn, Act_Gp)
# Data for this are in `temporalP`
glimpse(temporalP)
temporalP %<>% relocate(Inun, .after = PlotPos)
temporalP <- temporalP %>%
mutate(Inun = fct_relevel(`Inun`,
"y",
"m",
"n"))
# Quick correlation plot for evaluation
chart.Correlation(temporalP[, 8:36], histogram = TRUE, pch = 19)
# Drop and transform
ttemporalP <- temporalP %>%
select(-c(VH, VV, DTN)) %>%
mutate(across(c(Moisture, pHc, EC, AvailP, NO3, NH4, FAA, Proteolysis, DON, MBC, MBN, MicCN, TotalPLFA, F_B), ~log1p(.)))
chart.Correlation(ttemporalP[, 8:33], histogram = TRUE, pch = 19)
#prep
sttemporalP <- ttemporalP %>%
drop_na() %>%
mutate(across(c(13:33), ~z.fn(.)))
ftempP <- sttemporalP %>%
select(1:12)
dtempP <- sttemporalP %>%
select(13:33)
#PCoA
disttempP <- vegdist(dtempP, method = "euclidean", na.rm = TRUE)
ptempP <- pcoa(disttempP)
ptempP$values$Relative_eig[1:10]
barplot(ptempP$values$Relative_eig[1:10])
tempP_points <- bind_cols(ftempP, (as.data.frame(ptempP$vectors)))
compute.arrows = function (given_pcoa, orig_df) {
orig_df = orig_df #can be changed to select columns of interest only
n <- nrow(orig_df)
points.stand <- scale(given_pcoa$vectors)
S <- cov(orig_df, points.stand) #compute covariance of variables with all axes
pos_eigen = given_pcoa$values$Eigenvalues[seq(ncol(S))] #select only +ve eigenvalues
U <- S %*% diag((pos_eigen/(n - 1))^(-0.5)) #Standardise value of covariance
colnames(U) <- colnames(given_pcoa$vectors) #Get column names
given_pcoa$U <- U #Add values of covariates inside object
return(given_pcoa)
}
ptempP = compute.arrows(ptempP, dtempP)
ptempP_arrows_df <- as.data.frame(ptempP$U*10) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
# Plot
ggplot(tempP_points) + #Some separation by date, transect# seems noisy
geom_point(aes(x=Axis.1, y=Axis.2, colour = Transect, shape = `Sampling Period`), size = 6) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = ptempP_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = ptempP_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 18.6%",
y = "PCoA Axis 2; 15.7%")
ggplot(tempP_points) + #A bit more informative, definite axis1 trend of transect. Date clustering a bit more obvious
geom_point(aes(x=Axis.1, y=Axis.2, colour = PlotPos, shape = `Sampling Period`), size = 6) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = ptempP_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = ptempP_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 18.6%",
y = "PCoA Axis 2; 15.7%")
ggplot(tempP_points) + #Seems to clearly show separation
geom_point(aes(x=Axis.1, y=Axis.2, colour = PlotPos, shape = Inun), size = 6) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
scale_shape_manual(values = c(15, 18, 0)) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = ptempP_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = ptempP_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 18.6%",
y = "PCoA Axis 2; 15.7%")
# Permanova
set.seed(1983)
perm_tempPtp <- adonis2(disttempP~Transect*`Sampling Period`, data = sttemporalP, permutations = 9999, method = "euclidean")
perm_tempPtp #strong impact of transect and sampling period, no interaction
perm_tempPpp <- adonis2(disttempP~PlotPos*`Sampling Period`, data = sttemporalP, permutations = 9999, method = "euclidean")
perm_tempPpp #strong impact of plot position and sampling period, no interaction
perm_tempPtpp <- adonis2(disttempP~Transect+PlotPos+`Sampling Period`, data = sttemporalP, permutations = 9999, method = "euclidean")
perm_tempPtpp #strong impact of transect, plot position and sampling period in additive model
permpt_tempP <- pairwise.perm.manova(disttempP, sttemporalP$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_tempP #All differ except 0&8, 1&8, 3&9, 5&7
permpp_tempP <- pairwise.perm.manova(disttempP, sttemporalP$PlotPos, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpp_tempP #All differ except 2&3
permps_tempP <- pairwise.perm.manova(disttempP, sttemporalP$`Sampling Period`, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permps_tempP #All differ
# CAP by transect
sttemporalP <- as.data.frame(sttemporalP)
cap_temptP <- CAPdiscrim(disttempP~Transect, data = sttemporalP, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 99)
cap_temptP <- add.spec.scores(cap_temptP, dtempP, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
round(cap_temptP$F/sum(cap_temptP$F), digits=3)
barplot(cap_temptP$F/sum(cap_temptP$F))
cap_temptP_points <- bind_cols((as.data.frame(cap_temptP$x)), ftempP)
glimpse(cap_temptP_points)
cap_temptP_arrows <- as.data.frame(cap_temptP$cproj*5) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
ggplot(cap_temptP_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temptP_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temptP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 57.0%",
y = "CAP Axis 2; 16.7%")
# CAP by transect + spider
tempP_centt <- aggregate(cbind(LD1, LD2) ~ Transect, data = cap_temptP_points, FUN = mean)
tempP_segst <- merge(cap_temptP_points, setNames(tempP_centt, c('Transect', 'oLD1', 'oLD2')), by = 'Transect', sort = FALSE)
ggplot(cap_temptP_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 3, alpha = .6) +
geom_segment(data = tempP_segst, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = Transect), alpha = .7, size = .25) +
geom_point(data = tempP_centt, mapping = aes(x = LD1, y = LD2, colour = Transect), size = 5) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temptP_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temptP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 57.0%",
y = "CAP Axis 2; 16.7%")
# CAP by plotpos
cap_temppP <- CAPdiscrim(disttempP~PlotPos, data = sttemporalP, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 9)
cap_temppP <- add.spec.scores(cap_temppP, dtempP, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
round(cap_temppP$F/sum(cap_temppP$F), digits=3)
barplot(cap_temppP$F/sum(cap_temppP$F))
cap_temppP_points <- bind_cols((as.data.frame(cap_temppP$x)), ftempP)
glimpse(cap_temppP_points)
cap_temppP_arrows <- as.data.frame(cap_temppP$cproj*5) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
ggplot(cap_temppP_points) +
geom_point(aes(x=LD1, y=LD2, colour = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temppP_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temppP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 80.2%",
y = "CAP Axis 2; 18.7%")
# CAP by plot + spider
tempP_centp <- aggregate(cbind(LD1, LD2) ~ PlotPos, data = cap_temppP_points, FUN = mean)
tempP_segsp <- merge(cap_temppP_points, setNames(tempP_centp, c('PlotPos', 'oLD1', 'oLD2')), by = 'PlotPos', sort = FALSE)
ggplot(cap_temppP_points) +
geom_point(aes(x=LD1, y=LD2, colour = PlotPos), size = 3, alpha = .6) +
geom_segment(data = tempP_segsp, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = PlotPos), alpha = .9, size = .3) +
geom_point(data = tempP_centp, mapping = aes(x = LD1, y = LD2, colour = PlotPos), size = 5) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temppP_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temppP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 80.2%",
y = "CAP Axis 2; 18.7%")
# CAP by SamplingPeriod
cap_temppsP <- CAPdiscrim(disttempP~`Sampling Period`, data = sttemporalP, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 999)
cap_temppsP <- add.spec.scores(cap_temppsP, dtempP, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
saveRDS(cap_temppsP, file = "outputs/cap_temppsP.rds")
round(cap_temppsP$F/sum(cap_temppsP$F), digits=3)
barplot(cap_temppsP$F/sum(cap_temppsP$F))
cap_temppsP_points <- bind_cols((as.data.frame(cap_temppsP$x)), ftempP)
glimpse(cap_temppsP_points)
cap_temppsP_arrows <- as.data.frame(cap_temppsP$cproj*5) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
cap_temppsP_arrows
ggplot(cap_temppsP_points) +
geom_point(aes(x=LD1, y=LD2, colour = `Sampling Period`), size = 4) +
scale_colour_manual(values = brewer.pal(n = 6, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temppsP_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temppsP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 65.2%",
y = "CAP Axis 2; 22.6%")
# CAP by SamplingPeriod + spider
tempP_centps <- aggregate(cbind(LD1, LD2) ~ `Sampling Period`, data = cap_temppsP_points, FUN = mean)
tempP_segsps <- merge(cap_temppsP_points, setNames(tempP_centps, c('Sampling Period', 'oLD1', 'oLD2')), by = 'Sampling Period', sort = FALSE)
ggplot(cap_temppsP_points) +
geom_point(aes(x=LD1, y=LD2, colour = `Sampling Period`, shape = PlotPos), size = 2.5, alpha = .4) +
geom_segment(data = tempP_segsps, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = `Sampling Period`), alpha = .9, size = .3) +
geom_point(data = tempP_centps, mapping = aes(x = LD1, y = LD2, colour = `Sampling Period`), size = 8) +
scale_colour_manual(values = brewer.pal(n = 5, name = "Set1")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temppsP_arrows,
x = 0, y = 0, alpha = 0.6,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = cap_temppsP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 5
) +
labs(
x = "CAP Axis 1; 65.2%",
y = "CAP Axis 2; 22.6%",
shape = "Plot position")
#### temporal trends ####
#This needs to be a multi-panel figure(s) y = var, x = date, colour = plot position, thick lines and points = mean, hairlines = toposequences
# 1) TICK - make a df with only vars of interest
# 2) TICK - Make summary df with means by landscape position
# 3) TICK - Plot individuals with feint lines, colours by landscape position
# 4) TICK - Overlay points and thicker lines, colours by landscape position
seasonal <- temporalP %>%
select(-c(VH, VV, pHc, EC, DTN, MBC)) %>%
unite("Tr_PP", Transect:PlotPos, remove = FALSE)
seasonal_vars <- c("Date", "Moisture", "FAA", "NO3", "DON", "NH4", "AvailP", "DOC", "NDVI", "Wet", "Proteolysis", "AAMin_k1", "Gp_Gn", "F_B", "TotalPLFA", "MBN", "MicCN", "Act_Gp", "MicY")
seasonal_sum <- seasonal %>%
group_by(`Sampling Period`, PlotPos) %>%
summarise(across(all_of(seasonal_vars),
list(mean = ~ mean(.x, na.rm = TRUE)))) %>%
ungroup()
prot <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = Proteolysis, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = Proteolysis_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = Proteolysis_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Proteolysis rate"),
colour = "Plot position")
moist <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = Moisture, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = Moisture_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = Moisture_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("MC (g "~g^-1~")"),
colour = "Plot position")
faa <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = FAA, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = FAA_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = FAA_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("FAA-N (mg N "~kg^-1~")"),
colour = "Plot position")
no3 <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = NO3, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = NO3_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = NO3_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression (~NO[3]^{"-"}~"-N (mg "~kg^-1~")"),
colour = "Plot position")
nh4 <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = NH4, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = NH4_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = NH4_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression (~NH[4]^{"+"}~"-N (mg "~kg^-1~")"),
colour = "Plot position")
don <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = DON, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = DON_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = DON_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("DON (mg "~kg^-1~")"),
colour = "Plot position")
doc <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = DOC, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = DOC_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = DOC_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("DOC (mg "~kg^-1~")"),
colour = "Plot position")
availp <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = AvailP, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = AvailP_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = AvailP_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Available P (mg "~kg^-1~")"),
colour = "Plot position")
aak1 <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = AAMin_k1, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = AAMin_k1_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = AAMin_k1_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("AA min ("~h^-1~")"),
colour = "Plot position")
cue <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = MicY, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = MicY_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = MicY_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Amino acid CUE"),
colour = "Plot position")
gpgn <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = Gp_Gn, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = Gp_Gn_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = Gp_Gn_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("G+ : G- ratio"),
colour = "Plot position")
actgp <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = Act_Gp, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = Act_Gp_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = Act_Gp_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Actinomycete : G+ ratio"),
colour = "Plot position")
fb <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = F_B, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = F_B_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = F_B_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Fungal : Bacterial ratio"),
colour = "Plot position")
mbn <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = MBN, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = MBN_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = MBN_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("MBN (mg "~kg^-1~")"),
colour = "Plot position")
miccn <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = MicCN, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = MicCN_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = MicCN_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Microbial biomass C:N ratio"),
colour = "Plot position")
totp <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = TotalPLFA, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = TotalPLFA_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = TotalPLFA_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Total PLFA (nmol "~g^-1~")"),
colour = "Plot position")
ndvi <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = NDVI, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = NDVI_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = NDVI_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("NDVI"),
colour = "Plot position")
wet <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = Wet, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = Wet_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = Wet_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Wetness index"),
colour = "Plot position")
no3 + nh4 + faa + don + doc + availp + prot + aak1 + cue + moist +
plot_annotation(tag_levels = 'a') +
theme(plot.tag.position = c(0, 1),
plot.tag = element_text(size = 16, hjust = 0, vjust = 1)) +
plot_layout(ncol = 2, guides = 'collect') & theme(legend.position = 'bottom')
ndvi + wet + miccn + mbn + totp + fb + gpgn + actgp +
plot_annotation(tag_levels = 'a') +
theme(plot.tag.position = c(0, 1),
plot.tag = element_text(size = 16, hjust = 0, vjust = 1)) +
plot_layout(ncol = 2, guides = 'collect') & theme(legend.position = 'bottom')
#### inflows ####
inflow_raw <- read_csv("data/raw/KPinflows.csv") #read data
inflow_raw$YearTemp = inflow_raw$Year #duplicate year column for onwards
inflow_long <- inflow_raw %>% #put in long form and kill empty space
remove_empty() %>%
pivot_longer(!c(Year, YearTemp), names_to = "Month", values_to = "Inflow")
inflow_long$Month <- gsub("^.{0,4}", "", inflow_long$Month) #Remove filler on date
inflow_long$Month <- paste0(inflow_long$YearTemp,inflow_long$Month) #make full dates
head(inflow_long$Month)
inflow_long$Month <- as_date(inflow_long$Month) #format as date
str(inflow_long)
SplineFun <- splinefun(x = inflow_long$Month, y = inflow_long$Inflow) #splining function
Dates <- seq.Date(ymd("1895-01-01"), ymd("2019-12-31"), by = 1) #Dates filling sequence
SplineFit <- SplineFun(Dates) #apply spline to filling dates
head(SplineFit)
newDF <- data.frame(Dates = Dates, FitData = SplineFit) #glue vecs together
head(newDF)
str(newDF)
newDF$year <- as.numeric(format(newDF$Date,'%Y')) #Pull year into new column
newDF$Dates <- gsub("^.{0,4}", "2000", newDF$Dates) #Put dummy year into "month" so ridges plot aligned
newDF$Dates <- as_date(newDF$Dates) #re-make date type
#Needed for uninterpolated plot
month_levels <- c(
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
)
inflow_long$Month <- factor(inflow_long$Month, levels = month_levels)
str(inflow_long)
#Colours from gradient picking at top of script
inflow_col <- c("#F5E8C4",
"#B77A27",
"#E7D098",
"#EDD9A9",
"#F1E0B4",
"#DBEEEB",
"#F5F1E8",
"#613706",
"#F5EBD0",
"#C8EAE5",
"#BDE6E0",
"#005349",
"#C28734",
"#C99748",
"#187C74",
"#CCEBE6",
"#B8E3DD",
"#F3E3B9",
"#CC9C4E",
"#663A06",
"#5AB2A8",
"#005046",
"#003F33",
"#036860",
"#A36619",
"#3C9C93",
"#298B83",
"#CFA154",
"#1C8078",
"#66BAB0",
"#EBD6A3",
"#9BD8CE",
"#DBBB75",
"#E2F0EE",
"#B37625",
"#F5F3F0",
"#004C42",
"#72C3B8",
"#E9F2F1",
"#90D3C9",
"#84CEC3",
"#CFECE8",
"#9E6216",
"#6A3D07",
"#005A51",
"#734207",
"#A76A1C",
"#42A097",
"#E0C481",
"#814A09",
"#D1A65B",
"#F5F5F5",
"#95D5CC",
"#DEC07B",
"#EDF3F2",
"#E6CD92",
"#60B6AC",
"#00463B",
"#20847C",
"#F5EACC",
"#00493E",
"#003C30",
"#C48C3B",
"#25877F",
"#BB7D2A",
"#0B7068",
"#AB6E1F",
"#F4E6BF",
"#F5F2EC",
"#076C64",
"#EFDCAE",
"#D7EDEA",
"#8E530B",
"#4EA99F",
"#F5EDD8",
"#7ECCC0",
"#A6DCD4",
"#92570E",
"#005D55",
"#004237",
"#00574D",
"#BF822E",
"#D6B168",
"#B2E1DA",
"#ACDFD7",
"#F5E9C8",
"#006158",
"#543005",
"#6CBFB4",
"#C3E8E3",
"#F5ECD4",
"#31938B",
"#F1F4F3",
"#D3EDE9",
"#36978F",
"#54ADA3",
"#A1DAD1",
"#147870",
"#00645C",
"#8A4F09",
"#78C7BC",
"#10746C",
"#965B11",
"#E9D39D",
"#D9B66E",
"#8AD1C6",
"#D4AB61",
"#784508",
"#F5F0E4",
"#E4CA8C",
"#F5EEDC",
"#583205",
"#854D09",
"#6F3F07",
"#AF7222",
"#48A49B",
"#DEEFED",
"#E6F1EF",
"#F5EFE0",
"#E2C787",
"#7C4708",
"#2D8F87",
"#C79141",
"#9A5E14",
"#5D3505")
##without interpolation, will need tweaks as some feed-in code changed
ggplot(inflow_long, aes(x = Month, y = Year, height = Inflow, group = Year, fill = as.factor(Year))) +
geom_ridgeline(stat = "identity", alpha = 0.8, scale = 0.003, min_height = 1, size = 0.2, show.legend = FALSE) +
theme_classic() +
scale_y_reverse(breaks = c(1895, 1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010, 2019), expand = c(0,0), name = "", position = "right") +
scale_x_discrete(expand = c(0,0.1), name = "") +
theme(axis.line.y = element_blank(), axis.ticks.y = element_blank()) +
scale_fill_manual(values = inflow_col)
##with interpolation
ggplot(newDF, aes(x = Dates, y = year, height = FitData, group = year, fill = as.factor(year))) +
geom_ridgeline(stat = "identity", alpha = 0.8, scale = 0.003, min_height = 10, size = 0.2, show.legend = FALSE) +
theme_classic() +
scale_y_reverse(breaks = c(1895, 1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010, 2019),
minor_breaks = seq(1895, 2019, 5),
expand = c(0,0), name = "", position = "right") +
scale_x_date(date_breaks = "1 month", minor_breaks = "1 week", labels=date_format("%b"), expand = c(0,0.1), name = "") +
theme(axis.line.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.x = element_text(hjust = -1.5),
panel.grid.major.y = element_line(color = "black", size = 0.2, linetype = "dotted"),
panel.grid.minor.y = element_line(color = "black", size = 0.2, linetype = "dotted")) +
scale_fill_manual(values = inflow_col)
| /scripts/DataViz4Report.R | no_license | FarMar/ForestSoils | R | false | false | 106,489 | r | #####################################################################################################
#### Forest soils dataviz script ###################
#### mark.farrell@csiro.au +61 8 8303 8664 31/05/2021 ################################
#####################################################################################################
#### Set working directory ####
setwd("/Users/markfarrell/OneDrive - CSIRO/Data/ForestSoils")
#### Packages ####
install.packages("ggtern")
install.packages("ggdist")
install.packages("ggridges")
install.packages("scales")
library(tidyverse)
library(janitor)
library(PerformanceAnalytics)
library(corrplot)
library(RColorBrewer)
library(plotrix)
library(ggpmisc)
#library(ggtern)
library(ggbluebadge)
library(ggdist)
library(magrittr)
library(lubridate)
library(vegan)
library(ape)
library(RVAideMemoire)
library(BiodiversityR)
library(patchwork)
library(ggridges) #masks a lot of ggdist
library(scales)
#### Colours ####
# No margin
par(mar=c(0,0,1,0))
# Classic palette Spectral, with 11 colors
coul <- brewer.pal(11, "Spectral")
# Add more colors to this palette :
coul17 <- colorRampPalette(coul)(17)
# Plot it
pie(rep(1, length(coul17)), col = coul17 , main="")
# Classic palette Spectral, with 11 colors
coul <- brewer.pal(11, "Spectral")
# Add more colors to this palette :
coul11 <- colorRampPalette(coul)(11)
# Plot it
pie(rep(1, length(coul11)), col = coul11 , main="")
# Classic palette Spectral, with 11 colors
coul <- brewer.pal(11, "Spectral")
# Add more colors to this palette :
coul8 <- colorRampPalette(coul)(8)
# Plot it
pie(rep(1, length(coul8)), col = coul8 , main="")
# Output the palettes for reference
x<-list(coul8, coul11, coul17, coul125)
y<-tibble(column1= map_chr(x, str_flatten, " "))
write_csv(y, "colours.csv")
coul_inflow <- brewer.pal(11, "BrBG")
coul125 <- colorRampPalette(coul_inflow)(125)
#### data in ####
sum <- read_csv("data/processed/summary.csv")
all <- read_csv("data/processed/ChemAll_adm_OLremPLFA.csv")
sum %<>% mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun), as.factor))
str(sum)
all %<>% mutate(Date = dmy(Date)) %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
mutate("Sampling Period" = case_when(
Date >= as_date("2019-03-25") & Date <= as_date("2019-03-28") ~ "Autumn 2019",
Date >= as_date("2019-07-29") & Date <= as_date("2019-07-31") ~ "Winter 2019",
Date >= as_date("2019-11-04") & Date <= as_date("2019-11-06") ~ "At flooding",
Date >= as_date("2020-02-03") & Date <= as_date("2020-02-05") ~ "3 months post flood",
Date >= as_date("2020-10-13") & Date <= as_date("2020-10-15") ~ "11 months post flood"
)
) %>%
relocate("Sampling Period", .after = Date) %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos, "Sampling Period"), as.factor))
str(all)
#### Ternary plot ####
# This is best run standalone as {ggtern} masks a lot of ggplot
ggtern(data=sum, aes(Sand,Clay,Silt, color = Transect)) +
geom_point(size = 4) +
theme_rgbw() +
theme_hidetitles() +
theme(text = element_text(size=20)) +
theme(legend.key=element_blank())
#### MIR ####
# MIR import
mir <- read_csv("data/working/MasterFieldDataFC_NSW - MIR_raw.csv")
cols_condense(mir)
dim(mir)
mir <- mir %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
relocate(PlotPos, .after = Plot) %>%
mutate("Sampling Period" = case_when(
Date >= as_date("2019-03-25") & Date <= as_date("2019-03-28") ~ "Autumn 2019",
Date >= as_date("2019-07-29") & Date <= as_date("2019-07-31") ~ "Winter 2019",
Date >= as_date("2019-11-04") & Date <= as_date("2019-11-06") ~ "At flooding",
Date >= as_date("2020-02-03") & Date <= as_date("2020-02-05") ~ "3 months post flood",
Date >= as_date("2020-10-13") & Date <= as_date("2020-10-15") ~ "11 months post flood"
)
) %>%
relocate("Sampling Period", .after = Date)
mir$`Sampling Period` <- as.factor(mir$`Sampling Period`)
str(mir)
levels(mir$`Sampling Period`)
mir <- mir %>%
mutate(`Sampling Period` = fct_relevel(`Sampling Period`, #remember the back-ticks (would probably have solved factor palaver too)
"Autumn 2019",
"Winter 2019",
"At flooding",
"3 months post flood",
"11 months post flood"
))
# initial check plot
spec <- mir %>%
select(2, 27:1997)
waves <- seq(7999.27979, 401.121063, by = -3.8569)
colnames(spec[,2:1972]) <- waves
matplot(x = waves,
y = t(spec[2:1972]),
ylim = c(0, 3.5),
type = "l",
lty = 1,
main = "Raw spectra",
xlab = "Wavenumber (cm-1)",
ylab = "Absorbance",
col = rep(palette(), each = 3)
)
# Interpolation
mirinterp <- spec
mirinterp1 <- new("hyperSpec", # makes the hyperspec object
spc = mirinterp[, grep('[[:digit:]]', colnames(mirinterp))],
wavelength = as.numeric(colnames(mirinterp)[grep ('[[:digit:]]', colnames(mirinterp))]),
label = list(.wavelength = "Wavenumber",
spc = "Intensity"))
mirinterp3 <- hyperSpec::spc.loess(mirinterp1, c(seq(6000, 600, -4)))
# plot(mirinterp3, "spc", wl.reverse = T, col = rep(palette(), each = 3))
output <- mirinterp3[[]]
waves_l <- seq(6000, 600, by = -4)
colnames(output) <- waves_l
ID <- as.data.frame(mir$UniqueID)
final <- cbind(ID, output) #This is now the re-sampled df. Still needs baselining.
matplot(x = waves_l, y = t(final[,2:1352]), ylim=c(0,3), type = "l", lty = 1,
main = "Absorbance - 600 to 6000 & reample with resolution of 4", xlab = "Wavelength (nm)",
ylab = "Absorbance", col = rep(palette(), each = 3))
# baseline offset
spoffs2 <- function (spectra)
{
if (missing(spectra)) {
stop("No spectral data provided")
}
if (spectra[1, 1] < spectra[1, dim(spectra)[2]]) {
spectra <- t(apply(spectra, 1, rev))
}
s <- matrix(nrow = dim(spectra)[1], ncol = dim(spectra)[2])
for (i in 1:dim(spectra)[1]) {
s[i, ] <- spectra[i, ] - min(spectra[i, ])
}
output <- rbind(spectra[1, ], s)
output <- output[-1,]
}
spec_a_bc_d <- spoffs2(final[,2:1352])
dim(spec_a_bc_d)
head(spec_a_bc_d)
waves_ss <- seq(600, 6000, by=4)
matplot(x = waves_ss, y = t(spec_a_bc_d), ylim=c(0,2), xlim=rev(c(600, 6000)), type = "l", lty = 1,
main = "Absorbance - baseline corrected", xlab = expression("Wavenumber" ~ (cm^{-1})),
ylab = "Absorbance", col = rep(palette(), each = 3))
finalb <- cbind(ID, spec_a_bc_d) %>% #This is now the baselined and re-sampled df.
rename(UniqueID = "mir$UniqueID")
# combine data
mir_meta <- all %>%
select(UniqueID, Date, `Sampling Period`, Transect, Plot, PlotPos, Easting, Northing, Height, RHeight, RTHeight, Inun, Moisture)
mir_proc <- left_join(mir_meta, finalb, by = "UniqueID")
## Multivariate Exploration and Analysis
## MIR
# Prep
tmir <- mir_proc %>%
mutate(across(c(14:1364), ~((.+10)^(1/4))))
z.fn <- function(x) {
(x-mean(x))/sd(x)
}
stmir <- tmir %>%
mutate(across(c(14:1364), ~z.fn(.)))
fmir <- stmir %>%
select(1:13)
dmir <- stmir %>%
select(14:1363)
distmir <- vegdist(dmir, method = "manhattan", na.rm = TRUE)
pmir <- pcoa(distmir)
pmir$values$Relative_eig[1:10]
barplot(pmir$values$Relative_eig[1:10])
mir_points <- bind_cols(fmir, (as.data.frame(pmir$vectors)))
# Plot
ggplot(mir_points) +
geom_point(aes(x=Axis.1, y=Axis.2, colour = Transect, shape = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "PCoA Axis 1; 81.0%",
y = "PCoA Axis 2; 7.9%")
# Permanova
set.seed(1983)
perm_mir <- adonis2(distmir~Transect*`Sampling Period`, data = stmir, permutations = 9999, method = "manhattan")
perm_mir #strong impact of transect, weak of sampling time
permpt_mir <- pairwise.perm.manova(distmir, stmir$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_mir
permpd_mir <- pairwise.perm.manova(distmir, stmir$`Sampling Period`, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpd_mir #sniff of significance for last sampling vs 1st three samplings
perm_mirh <- adonis2(distmir~Transect*RTHeight, data = stmir, permutations = 9999, method = "manhattan")
perm_mirh #strong height interaction
# CAP by transect
stmir <- as.data.frame(stmir)
cap_mirt <- CAPdiscrim(distmir~Transect, data = stmir, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 999)
saveRDS(cap_mirt, file = "outputs/MIRCAP.rds")
readRDS("outputs/MIRCAP.rds")
round(cap_mirt$F/sum(cap_mirt$F), digits=3)
barplot(cap_mirt$F/sum(cap_mirt$F))
cap_mirt_points <- bind_cols((as.data.frame(cap_mirt$x)), fmir)
glimpse(cap_mirt_points)
ggplot(cap_mirt_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "CAP Axis 1; 41.2%",
y = "CAP Axis 2; 35.3%")
# CAP + spider
mir_cent <- aggregate(cbind(LD1, LD2) ~ Transect, data = cap_mirt_points, FUN = mean)
mir_segs <- merge(cap_mirt_points, setNames(mir_cent, c('Transect', 'oLD1', 'oLD2')), by = 'Transect', sort = FALSE)
ggplot(cap_mirt_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 3, alpha = .7) +
geom_segment(data = mir_segs, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = Transect), alpha = .5, size = .25) +
geom_point(data = mir_cent, mapping = aes(x = LD1, y = LD2, colour = Transect), size = 5, alpha = 1.0) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "CAP Axis 1; 41.2%",
y = "CAP Axis 2; 35.3%")
#### Metals PCA ####
metals <- sum %>%
select(c(1:11, 45:65)) %>%
select(-c(As, Cd, Mo, Sb, Se)) %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, PlotPos), as.factor))
metals %<>%
mutate(P = log1p(P),
Na = log1p(Na),
Mg = log1p(Mg),
K = log1p(K),
Co = log1p(Co),
Ca = log1p(Ca))
chart.Correlation(metals[13:28])
pca_metals <- princomp(metals[13:28], cor = TRUE, scores = TRUE)
biplot(pca_metals, choices = c(1,2))
summary(pca_metals) #PC1 = 58.3%, PC2 = 13.9%
scores_metals <- as.data.frame(pca_metals[["scores"]]) %>%
select(1:2)
metals_plot <- bind_cols(metals, scores_metals)
metals_cent <- aggregate(cbind(Comp.1, Comp.2) ~ Transect, data = metals_plot, FUN = mean)
metals_segs <- merge(metals_plot, setNames(metals_cent, c('Transect', 'PC1', 'PC2')), by = 'Transect', sort = FALSE)
ggplot(metals_plot) +
geom_point(aes(x=Comp.1, y=Comp.2, colour = Transect, shape = PlotPos), size = 3, alpha = .7) +
geom_segment(data = metals_segs, mapping = aes(x = Comp.1, y = Comp.2, xend = PC1, yend = PC2, colour = Transect), alpha = .5, size = .25) +
geom_point(data = metals_cent, mapping = aes(x = Comp.1, y = Comp.2, colour = Transect), size = 5, alpha = 1.0) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "PCA Axis 1; 58.3%",
y = "PCA Axis 2; 13.9%")
#### BW ####
# Landscape data plots
RTHeight <- ggplot(sum) +
stat_halfeye(aes(y = RTHeight),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#9E0142") +
geom_point(aes(x = 0, y = RTHeight, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = RTHeight),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Relative height in toposequence (m)",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
TWI <- ggplot(sum) +
stat_halfeye(aes(y = TWI),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#D53E4F") +
geom_point(aes(x = 0, y = TWI, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = TWI),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Topographic wetness index",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
TPI <- ggplot(sum) +
stat_halfeye(aes(y = TPI),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#F46D43") +
geom_point(aes(x = 0, y = TPI, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = TPI),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Topographic position index",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Slope <- ggplot(sum) +
stat_halfeye(aes(y = Slope),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FDAE61") +
geom_point(aes(x = 0, y = Slope, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Slope),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Slope",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
planCurv <- ggplot(sum) +
stat_halfeye(aes(y = planCurv),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FEE08B") +
geom_point(aes(x = 0, y = planCurv, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = planCurv),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Plan curvature",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
proCurv <- ggplot(sum) +
stat_halfeye(aes(y = proCurv),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FFFFBF") +
geom_point(aes(x = 0, y = proCurv, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = proCurv),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Profile curvature",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
NDVI <- ggplot(all) +
stat_halfeye(aes(y = NDVI),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#E6F598") +
geom_point(aes(x = 0, y = NDVI, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = NDVI),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Normalised difference vegetation index (NDVI)",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Wet <- ggplot(all) +
stat_halfeye(aes(y = Wet),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#ABDDA4") +
geom_point(aes(x = 0, y = Wet, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Wet),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = "Soil moisture by synthetic aperture radar (Sentinel)",
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Moisture <- ggplot(all) +
stat_halfeye(aes(y = Moisture),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#66C2A5") +
geom_point(aes(x = 0, y = Moisture, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Moisture),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Soil moisture (g"~g^-1~" dry weight)"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
WHC <- ggplot(sum) +
stat_halfeye(aes(y = WHC),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#3288BD") +
geom_point(aes(x = 0, y = WHC, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = WHC),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Water holding capacity (g"~g^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
BD0_30 <- ggplot(sum) +
stat_halfeye(aes(y = BD0_30),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#5E4FA2") +
geom_point(aes(x = 0, y = BD0_30, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = BD0_30),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Bulk density (g"~cm^-3~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
RTHeight + TWI + TPI + Slope + planCurv + proCurv + NDVI +
Wet + Moisture + WHC + BD0_30 + guide_area() +
plot_layout(ncol = 6, guides = 'collect') +
plot_annotation(tag_levels = 'a') &
theme(plot.tag.position = c(1, 1),
plot.tag = element_text(size = 16, hjust = 4, vjust = 2))
#y = expression ("Bulk density g"~cm^-3)
# Chem data
pHc <- ggplot(all) +
stat_halfeye(aes(y = pHc),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#9E0142") +
geom_point(aes(x = 0, y = pHc, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = pHc),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression (~pH[CaCl[2]]),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
EC <- ggplot(all) +
stat_halfeye(aes(y = EC),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#C0247A") +
geom_point(aes(x = 0, y = EC, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = EC),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Electrical conductivity (dS "~m^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
CEC <- ggplot(sum) +
stat_halfeye(aes(y = CEC),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#DC494C") +
geom_point(aes(x = 0, y = CEC, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = CEC),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Cation exchange capacity ("~cmol^+~" "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
PC1 <- ggplot(metals_plot) +
stat_halfeye(aes(y = Comp.1),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#F06744") +
geom_point(aes(x = 0, y = Comp.1, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Comp.1),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total elements principal component 1, 58.3% of variance"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
PC2 <- ggplot(metals_plot) +
stat_halfeye(aes(y = Comp.2),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#F88D51") +
geom_point(aes(x = 0, y = Comp.2, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Comp.2),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total elements principal component 2, 13.9% of variance"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
P <- ggplot(sum) +
stat_halfeye(aes(y = P),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FDB466") +
geom_point(aes(x = 0, y = P, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = P),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total phosphorus (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
K <- ggplot(sum) +
stat_halfeye(aes(y = K),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FDD380") +
geom_point(aes(x = 0, y = K, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = K),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total potassium (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
S <- ggplot(sum) +
stat_halfeye(aes(y = S),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FEEB9E") +
geom_point(aes(x = 0, y = S, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = S),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total sulphur (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
TotOC <- sum %>% drop_na(TotOC_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = TotOC_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FFFFBF") +
geom_point(aes(x = 0, y = TotOC_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = TotOC_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total organic carbon (g "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
TotN <- sum %>% drop_na(TotN_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = TotN_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#EFF8A6") +
geom_point(aes(x = 0, y = TotN_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = TotN_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total nitrogen (g "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
CN <- sum %>% drop_na(CN_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = CN_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#D7EF9B") +
geom_point(aes(x = 0, y = CN_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = CN_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("C:N ratio"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
d13C <- sum %>% drop_na(d13C_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = d13C_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#B2E0A2") +
geom_point(aes(x = 0, y = d13C_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = d13C_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression (paste(delta^{13}, "C (\u2030)")),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
d15N <- sum %>% drop_na(d15N_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = d15N_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#88CFA4") +
geom_point(aes(x = 0, y = d15N_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = d15N_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression (paste(delta^{15}, "N (\u2030)")),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
POC <- sum %>% drop_na(POC_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = POC_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#5FBAA8") +
geom_point(aes(x = 0, y = POC_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = POC_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Particulate organic carbon (g "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
HOC <- sum %>% drop_na(HOC_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = HOC_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#3F96B7") +
geom_point(aes(x = 0, y = HOC_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = HOC_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Humus organic carbon (g "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
ROC <- sum %>% drop_na(ROC_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = ROC_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#4272B2") +
geom_point(aes(x = 0, y = ROC_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = ROC_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Resistant organic carbon (g "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Vuln <- sum %>% drop_na(Vuln_mean) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Vuln_mean),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#5E4FA2") +
geom_point(aes(x = 0, y = Vuln_mean, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Vuln_mean),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Organic carbon vulnerability"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
pHc + EC + CEC + PC1 + PC2 + P + K + S + TotOC +
TotN + CN + d13C + d15N + POC + HOC + ROC + Vuln + guide_area() +
plot_layout(ncol = 6, guides = 'collect') +
plot_annotation(tag_levels = 'a') &
theme(plot.tag.position = c(1, 1),
plot.tag = element_text(size = 16, hjust = 2, vjust = 2))
### Dynamic
NO3 <- all %>% drop_na(NO3) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = NO3),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#9E0142") +
geom_point(aes(x = 0, y = NO3, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = NO3),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Extractable "~NO[3]^{"-"}~"-N (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
NH4 <- all %>% drop_na(NH4) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = NH4),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#E25249") +
geom_point(aes(x = 0, y = NH4, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = NH4),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Extractable "~NH[4]^{"+"}~"-N (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
FAA <- all %>% drop_na(FAA) %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = FAA),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FBA45C") +
geom_point(aes(x = 0, y = FAA, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = FAA),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Extractable free amino acid-N (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
DON <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = DON),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FEE899") +
geom_point(aes(x = 0, y = DON, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = DON),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Dissolved organic N (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
DOC <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = DOC),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#EDF7A3") +
geom_point(aes(x = 0, y = DOC, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = DOC),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Dissolved organic C (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
MBC <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = MBC),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#A1D9A4") +
geom_point(aes(x = 0, y = MBC, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = MBC),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Microbial biomass C (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
MBN <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = MBN),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#48A0B2") +
geom_point(aes(x = 0, y = MBN, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = MBN),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Microbial biomass N (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
AvailP <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = AvailP),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#5E4FA2") +
geom_point(aes(x = 0, y = AvailP, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = AvailP),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Olsen-extractable P (mg "~kg^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
NO3 + NH4 + FAA + DON + DOC + MBC + MBN + AvailP +
plot_layout(ncol = 4, guides = 'collect') +
plot_annotation(tag_levels = 'a') &
theme(plot.tag.position = c(0, 1),
plot.tag = element_text(size = 16, hjust = -12, vjust = 2))
### Microbial
Proteolysis <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Proteolysis),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#9E0142") +
geom_point(aes(x = 0, y = Proteolysis, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Proteolysis),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Proteolysis rate (mg AA-N"~kg^-1~h^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
AAMin_k1 <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = AAMin_k1),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#D53E4F") +
geom_point(aes(x = 0, y = AAMin_k1, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = AAMin_k1),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Rate of initial AA mineralisation ("~h^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
MicY <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = MicY),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#F46D43") +
geom_point(aes(x = 0, y = MicY, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = MicY),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Microbial yield"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
TotalPLFA <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = TotalPLFA),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FDAE61") +
geom_point(aes(x = 0, y = TotalPLFA, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = TotalPLFA),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Total PLFA (nmol "~g^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Bac <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Bac),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FEE08B") +
geom_point(aes(x = 0, y = Bac, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Bac),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Bacterial PLFA (nmol "~g^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Fun <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Fun),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#FFFFBF") +
geom_point(aes(x = 0, y = Fun, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Fun),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Fungal PLFA (nmol "~g^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Gpos <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Gpos),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#E6F598") +
geom_point(aes(x = 0, y = Gpos, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Gpos),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("G+ bacterial PLFA (nmol "~g^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Gneg <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Gneg),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#ABDDA4") +
geom_point(aes(x = 0, y = Gneg, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Gneg),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("G- bacterial PLFA (nmol "~g^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Act <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Act),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#66C2A5") +
geom_point(aes(x = 0, y = Act, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Act),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Actinomycete PLFA (nmol "~g^-1~")"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
F_B <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = F_B),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#3288BD") +
geom_point(aes(x = 0, y = F_B, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = F_B),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Fungal:Bacterial ratio"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Gp_Gn <- all %>% drop_na() %>% # Neat little hack to drop NA samples
ggplot() + # Also need to drop the df call here
stat_halfeye(aes(y = Gp_Gn),
adjust = .5,
width = .6,
.width = 0,
justification = -.3,
point_colour = NA,
fill = "#5E4FA2") +
geom_point(aes(x = 0, y = Gp_Gn, colour = Transect),
shape = 21,
stroke = 1,
size = 3,
position = position_jitter(
seed = 1,
width = 0.1
)
) +
geom_boxplot(aes(y = Gp_Gn),
alpha = 0,
width = .25,
outlier.shape = NA
) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
labs(y = expression ("Gram+:Gram- ratio"),
colour = "Toposequence") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
Proteolysis + AAMin_k1 + MicY + TotalPLFA + Bac + Fun +
Gpos + Gneg + Act + F_B + Gp_Gn + guide_area() +
plot_layout(ncol = 6, guides = 'collect') +
plot_annotation(tag_levels = 'a') &
theme(plot.tag.position = c(1, 1),
plot.tag = element_text(size = 16, hjust = 4, vjust = 2))
#### xy plots ####
# Add plot position
sum %<>% group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos), as.factor))
str(sum)
# isotopes
#CN
cn_c <- ggplot(sum) +
geom_point(aes(x=CN_mean, y=d13C_mean, colour = PlotPos), size = 3) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "C:N ratio",
y = expression (paste(delta^{13}, "C (\u2030)")),
colour = "Plot position")
cn_n <- ggplot(sum) +
geom_point(aes(x=CN_mean, y=d15N_mean, colour = PlotPos), size = 3) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "C:N ratio",
y = expression (paste(delta^{15}, "N (\u2030)")),
colour = "Plot position")
#vuln
vuln_c <- ggplot(sum) +
geom_point(aes(x=Vuln_mean, y=d13C_mean, colour = PlotPos), size = 3) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "SOC vulnerability",
y = expression (paste(delta^{13}, "C (\u2030)")),
colour = "Plot position")
vuln_n <- ggplot(sum) +
geom_point(aes(x=Vuln_mean, y=d15N_mean, colour = PlotPos), size = 3) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "SOC vulnerability",
y = expression (paste(delta^{15}, "N (\u2030)")),
colour = "Plot position")
#iso only
iso <- ggplot(sum) +
geom_point(aes(x=d13C_mean, y=d15N_mean, colour = PlotPos), size = 3) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = expression (paste(delta^{13}, "C (\u2030)")),
y = expression (paste(delta^{15}, "N (\u2030)")),
colour = "Plot position")
cn_c + cn_n + iso + vuln_c + vuln_n + guide_area()
plot_layout(ncol = 3, guides = 'collect') +
plot_annotation(tag_levels = 'a') &
theme(plot.tag.position = c(0, 1),
plot.tag = element_text(size = 16, hjust = -5, vjust = 1))
#### local scale ####
#### biogeochem ####
t1_summary <- read_csv("data/processed/summary.csv")
t1_summary <- t1_summary %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos), as.factor))
str(t1_summary)
t1_summary <- t1_summary %>%
relocate(where(is.character))
bgc_mean <- t1_summary %>%
select(UniqueID, Transect, Plot, PlotPos, Easting, Northing, Height, RHeight, RTHeight, Inun,
Clay, CEC, WHC, BD0_30, NDVI_mean, Wet_mean, Moisture_mean, pHc_mean, EC_mean, AvailP_mean, CN_mean, Vuln_mean,
d13C_mean, d15N_mean, DOC_mean, NO3_mean, NH4_mean, FAA_mean, Proteolysis_mean,
AAMin_k1_mean, DON_mean, MBC_mean, MBN_mean, MicY_mean)
#pre-prep - PCA of total emlements to reduce dimenstions
tot_elms <- t1_summary %>%
select(47:66) %>%
select(!c(As, B, Cd, Mo, Sb, Se))
chart.Correlation(tot_elms)
ttot_elms <- tot_elms %>%
mutate(P = log1p(P),
Na = log1p(Na),
Mg = log1p(Mg),
K = log1p(K),
Co = log1p(Co),
Ca = log1p(Ca))
chart.Correlation(ttot_elms)
pca_elms <- princomp(ttot_elms, cor = TRUE, scores = TRUE)
biplot(pca_elms, choices = c(1,2))
summary(pca_elms) #PC1 = 59.2%, PC2 = 11.7%
scores_elms <- as.data.frame(pca_elms[["scores"]]) %>%
select(1:2)
#prep
bgc_mean <- cbind(bgc_mean, scores_elms)
bgc_cor <- select(bgc_mean, 11:36)
chart.Correlation(bgc_cor, histogram=TRUE, pch=19)
tbgc_mean <- bgc_mean %>%
mutate(MBN_mean = log1p(MBN_mean),
NH4_mean = log1p(NH4_mean),
AvailP_mean = log1p(AvailP_mean),
EC_mean = log1p(EC_mean),
pHc_mean = log1p(pHc_mean),
BD0_30 = log1p(BD0_30))
stbgc_mean <- tbgc_mean %>%
mutate(across(c(11:36), ~z.fn(.)))
fbgc <- stbgc_mean %>%
select(1:10)
dbgc <- stbgc_mean %>%
select(11:36)
# PCoA
distbgc <- vegdist(dbgc, method = "euclidean", na.rm = TRUE)
pbgc <- pcoa(distbgc)
pbgc$values$Relative_eig[1:10]
barplot(pbgc$values$Relative_eig[1:10])
bgc_points <- bind_cols(fbgc, (as.data.frame(pbgc$vectors)))
compute.arrows = function (given_pcoa, orig_df) {
orig_df = orig_df #can be changed to select columns of interest only
n <- nrow(orig_df)
points.stand <- scale(given_pcoa$vectors)
S <- cov(orig_df, points.stand) #compute covariance of variables with all axes
pos_eigen = given_pcoa$values$Eigenvalues[seq(ncol(S))] #select only +ve eigenvalues
U <- S %*% diag((pos_eigen/(n - 1))^(-0.5)) #Standardise value of covariance
colnames(U) <- colnames(given_pcoa$vectors) #Get column names
given_pcoa$U <- U #Add values of covariates inside object
return(given_pcoa)
}
pbgc = compute.arrows(pbgc, dbgc)
pbgc_arrows_df <- as.data.frame(pbgc$U*10) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
# Plot
ggplot(bgc_points) +
geom_point(aes(x=Axis.1, y=Axis.2, colour = PlotPos), size = 6) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = pbgc_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = pbgc_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 25.6%",
y = "PCoA Axis 2; 16.2%")
# Permanova
set.seed(1983)
perm_bgc <- adonis2(distbgc~Transect+PlotPos, data = stbgc_mean, permutations = 9999, method = "euclidean")
perm_bgc #strong impact of transect and plot
permpt_bgc <- pairwise.perm.manova(distbgc, stbgc_mean$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_bgc #.098 is lowest possible - several pairwise comps have this
permpp_bgc <- pairwise.perm.manova(distbgc, stbgc_mean$PlotPos, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpp_bgc #4 is sig diff from 1&2. 3 borderline diff from 1&2. 1 borderline diff from 2
# CAP by transect
stbgc_mean <- as.data.frame(stbgc_mean)
cap_bgct <- CAPdiscrim(distbgc~Transect, data = stbgc_mean, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 999)
cap_bgct <- add.spec.scores(cap_bgct, dbgc, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
saveRDS(cap_bgct, file = "data/processed/CAP_bgct.rds")
round(cap_bgct$F/sum(cap_bgct$F), digits=3)
barplot(cap_bgct$F/sum(cap_bgct$F))
cap_bgct_points <- bind_cols((as.data.frame(cap_bgct$x)), fbgc)
glimpse(cap_bgct_points)
cap_bgct_arrows <- as.data.frame(cap_bgct$cproj*5) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
ggplot(cap_bgct_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_bgct_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_bgct_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 56.7%",
y = "CAP Axis 2; 23.0%")
# CAP by transect + spider
bgc_centt <- aggregate(cbind(LD1, LD2) ~ Transect, data = cap_bgct_points, FUN = mean)
bgc_segst <- merge(cap_bgct_points, setNames(bgc_centt, c('Transect', 'oLD1', 'oLD2')), by = 'Transect', sort = FALSE)
cap_bgct_fig <- ggplot(cap_bgct_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 3, alpha = .6) +
geom_segment(data = bgc_segst, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = Transect), alpha = .7, size = .25) +
geom_point(data = bgc_centt, mapping = aes(x = LD1, y = LD2, colour = Transect), size = 5) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_bgct_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_bgct_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 56.7%",
y = "CAP Axis 2; 23.0%",
colour = "Toposequence",
shape = "Plot position")
# CAP by plotpos
stbgc_mean <- as.data.frame(stbgc_mean)
cap_bgcp <- CAPdiscrim(distbgc~PlotPos, data = stbgc_mean, axes = 10, m = 3, mmax = 10, add = FALSE, permutations = 999)
cap_bgcp <- add.spec.scores(cap_bgcp, dbgc, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
saveRDS(cap_bgcp, file = "data/processed/CAP_bgcp.rds")
round(cap_bgcp$F/sum(cap_bgcp$F), digits=3)
barplot(cap_bgcp$F/sum(cap_bgcp$F))
cap_bgcp_points <- bind_cols((as.data.frame(cap_bgcp$x)), fbgc)
glimpse(cap_bgcp_points)
cap_bgcp_arrows <- as.data.frame(cap_bgcp$cproj*3) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
ggplot(cap_bgcp_points) +
geom_point(aes(x=LD1, y=LD2, colour = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_bgcp_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_bgcp_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 76.3%",
y = "CAP Axis 2; 23.7%")
# CAP by plot + spider
bgc_centp <- aggregate(cbind(LD1, LD2) ~ PlotPos, data = cap_bgcp_points, FUN = mean)
bgc_segsp <- merge(cap_bgcp_points, setNames(bgc_centp, c('PlotPos', 'oLD1', 'oLD2')), by = 'PlotPos', sort = FALSE)
cap_bgcpfig <- ggplot(cap_bgcp_points) +
geom_point(aes(x=LD1, y=LD2, colour = PlotPos), size = 3, alpha = .6) +
geom_segment(data = bgc_segsp, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = PlotPos), alpha = .9, size = .3) +
geom_point(data = bgc_centp, mapping = aes(x = LD1, y = LD2, colour = PlotPos), size = 5) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_bgcp_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_bgcp_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 76.3%",
y = "CAP Axis 2; 23.7%",
colour = "Plot position")
cap_bgct_fig + cap_bgcpfig +
plot_layout(ncol = 1) +
plot_annotation(tag_levels = 'a') &
theme(plot.tag.position = c(0, 1),
plot.tag = element_text(size = 16, hjust = -5, vjust = 1))
#### temporal ####
OL_cor <- read_csv("data/processed/ChemAll_adm_OLrem.csv")
OL_cor <- OL_cor %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos), as.factor)) %>%
mutate(Date = dmy(Date))
str(OL_cor)
plfa <- read_csv("data/working/MasterFieldDataFC_NSW - PLFAs.csv")
plfa <- plfa %>%
mutate(Date = dmy(Date)) %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
mutate("Sampling Period" = case_when(
Date >= as_date("2019-03-25") & Date <= as_date("2019-03-28") ~ "Autumn 2019",
Date >= as_date("2019-07-29") & Date <= as_date("2019-07-31") ~ "Winter 2019",
Date >= as_date("2019-11-04") & Date <= as_date("2019-11-06") ~ "At flooding",
Date >= as_date("2020-02-03") & Date <= as_date("2020-02-05") ~ "3 months post flood",
Date >= as_date("2020-10-13") & Date <= as_date("2020-10-15") ~ "11 months post flood"
)
) %>%
relocate("Sampling Period", .after = Date) %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos, "Sampling Period"), as.factor))
plfa <- plfa %>%
mutate(`Sampling Period` = fct_relevel(`Sampling Period`, #remember the back-ticks (would probably have solved factor palaver too)
"Autumn 2019",
"Winter 2019",
"At flooding",
"3 months post flood",
"11 months post flood"
))
str(plfa)
OLP_cor <- read_csv("data/processed/ChemAll_adm_OLremPLFA.csv")
OLP_cor <- OLP_cor %>%
mutate(Date = dmy(Date)) %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
mutate("Sampling Period" = case_when(
Date >= as_date("2019-03-25") & Date <= as_date("2019-03-28") ~ "Autumn 2019",
Date >= as_date("2019-07-29") & Date <= as_date("2019-07-31") ~ "Winter 2019",
Date >= as_date("2019-11-04") & Date <= as_date("2019-11-06") ~ "At flooding",
Date >= as_date("2020-02-03") & Date <= as_date("2020-02-05") ~ "3 months post flood",
Date >= as_date("2020-10-13") & Date <= as_date("2020-10-15") ~ "11 months post flood"
)
) %>%
relocate("Sampling Period", .after = Date) %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos, "Sampling Period"), as.factor))
str(OLP_cor)
OL_cor <- OL_cor %>%
mutate("Sampling Period" = case_when(
Date >= as_date("2019-03-25") & Date <= as_date("2019-03-28") ~ "Autumn 2019",
Date >= as_date("2019-07-29") & Date <= as_date("2019-07-31") ~ "Winter 2019",
Date >= as_date("2019-11-04") & Date <= as_date("2019-11-06") ~ "At flooding",
Date >= as_date("2020-02-03") & Date <= as_date("2020-02-05") ~ "3 months post flood",
Date >= as_date("2020-10-13") & Date <= as_date("2020-10-15") ~ "11 months post flood"
)
) %>%
relocate("Sampling Period", .after = Date)
OL_cor$`Sampling Period` <- as.factor(OL_cor$`Sampling Period`)
str(OL_cor)
levels(OL_cor$`Sampling Period`)
OL_cor <- OL_cor %>%
mutate(`Sampling Period` = fct_relevel(`Sampling Period`, #remember the back-ticks (would probably have solved factor palaver too)
"Autumn 2019",
"Winter 2019",
"At flooding",
"3 months post flood",
"11 months post flood"
))
temporalP <- OLP_cor %>%
select(UniqueID, Date, `Sampling Period`, Transect, Plot, PlotPos, Easting, Northing, Height, RHeight, RTHeight, Inun,
NDVI, VH, VV, Wet, Moisture, pHc, EC, AvailP,
DOC, DTN, NO3, NH4, FAA, Proteolysis, AAMin_k1, DON, MBC,
MBN, MicY, MicCN, TotalPLFA, F_B, Gp_Gn, Act_Gp)
# Data for this are in `temporalP`
glimpse(temporalP)
temporalP %<>% relocate(Inun, .after = PlotPos)
temporalP <- temporalP %>%
mutate(Inun = fct_relevel(`Inun`,
"y",
"m",
"n"))
# Quick correlation plot for evaluation
chart.Correlation(temporalP[, 8:36], histogram = TRUE, pch = 19)
# Drop and transform
ttemporalP <- temporalP %>%
select(-c(VH, VV, DTN)) %>%
mutate(across(c(Moisture, pHc, EC, AvailP, NO3, NH4, FAA, Proteolysis, DON, MBC, MBN, MicCN, TotalPLFA, F_B), ~log1p(.)))
chart.Correlation(ttemporalP[, 8:33], histogram = TRUE, pch = 19)
#prep
sttemporalP <- ttemporalP %>%
drop_na() %>%
mutate(across(c(13:33), ~z.fn(.)))
ftempP <- sttemporalP %>%
select(1:12)
dtempP <- sttemporalP %>%
select(13:33)
#PCoA
disttempP <- vegdist(dtempP, method = "euclidean", na.rm = TRUE)
ptempP <- pcoa(disttempP)
ptempP$values$Relative_eig[1:10]
barplot(ptempP$values$Relative_eig[1:10])
tempP_points <- bind_cols(ftempP, (as.data.frame(ptempP$vectors)))
compute.arrows = function (given_pcoa, orig_df) {
orig_df = orig_df #can be changed to select columns of interest only
n <- nrow(orig_df)
points.stand <- scale(given_pcoa$vectors)
S <- cov(orig_df, points.stand) #compute covariance of variables with all axes
pos_eigen = given_pcoa$values$Eigenvalues[seq(ncol(S))] #select only +ve eigenvalues
U <- S %*% diag((pos_eigen/(n - 1))^(-0.5)) #Standardise value of covariance
colnames(U) <- colnames(given_pcoa$vectors) #Get column names
given_pcoa$U <- U #Add values of covariates inside object
return(given_pcoa)
}
ptempP = compute.arrows(ptempP, dtempP)
ptempP_arrows_df <- as.data.frame(ptempP$U*10) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
# Plot
ggplot(tempP_points) + #Some separation by date, transect# seems noisy
geom_point(aes(x=Axis.1, y=Axis.2, colour = Transect, shape = `Sampling Period`), size = 6) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = ptempP_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = ptempP_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 18.6%",
y = "PCoA Axis 2; 15.7%")
ggplot(tempP_points) + #A bit more informative, definite axis1 trend of transect. Date clustering a bit more obvious
geom_point(aes(x=Axis.1, y=Axis.2, colour = PlotPos, shape = `Sampling Period`), size = 6) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = ptempP_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = ptempP_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 18.6%",
y = "PCoA Axis 2; 15.7%")
ggplot(tempP_points) + #Seems to clearly show separation
geom_point(aes(x=Axis.1, y=Axis.2, colour = PlotPos, shape = Inun), size = 6) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
scale_shape_manual(values = c(15, 18, 0)) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = ptempP_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = ptempP_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 18.6%",
y = "PCoA Axis 2; 15.7%")
# Permanova
set.seed(1983)
perm_tempPtp <- adonis2(disttempP~Transect*`Sampling Period`, data = sttemporalP, permutations = 9999, method = "euclidean")
perm_tempPtp #strong impact of transect and sampling period, no interaction
perm_tempPpp <- adonis2(disttempP~PlotPos*`Sampling Period`, data = sttemporalP, permutations = 9999, method = "euclidean")
perm_tempPpp #strong impact of plot position and sampling period, no interaction
perm_tempPtpp <- adonis2(disttempP~Transect+PlotPos+`Sampling Period`, data = sttemporalP, permutations = 9999, method = "euclidean")
perm_tempPtpp #strong impact of transect, plot position and sampling period in additive model
permpt_tempP <- pairwise.perm.manova(disttempP, sttemporalP$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_tempP #All differ except 0&8, 1&8, 3&9, 5&7
permpp_tempP <- pairwise.perm.manova(disttempP, sttemporalP$PlotPos, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpp_tempP #All differ except 2&3
permps_tempP <- pairwise.perm.manova(disttempP, sttemporalP$`Sampling Period`, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permps_tempP #All differ
# CAP by transect
sttemporalP <- as.data.frame(sttemporalP)
cap_temptP <- CAPdiscrim(disttempP~Transect, data = sttemporalP, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 99)
cap_temptP <- add.spec.scores(cap_temptP, dtempP, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
round(cap_temptP$F/sum(cap_temptP$F), digits=3)
barplot(cap_temptP$F/sum(cap_temptP$F))
cap_temptP_points <- bind_cols((as.data.frame(cap_temptP$x)), ftempP)
glimpse(cap_temptP_points)
cap_temptP_arrows <- as.data.frame(cap_temptP$cproj*5) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
ggplot(cap_temptP_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temptP_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temptP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 57.0%",
y = "CAP Axis 2; 16.7%")
# CAP by transect + spider
tempP_centt <- aggregate(cbind(LD1, LD2) ~ Transect, data = cap_temptP_points, FUN = mean)
tempP_segst <- merge(cap_temptP_points, setNames(tempP_centt, c('Transect', 'oLD1', 'oLD2')), by = 'Transect', sort = FALSE)
ggplot(cap_temptP_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 3, alpha = .6) +
geom_segment(data = tempP_segst, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = Transect), alpha = .7, size = .25) +
geom_point(data = tempP_centt, mapping = aes(x = LD1, y = LD2, colour = Transect), size = 5) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temptP_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temptP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 57.0%",
y = "CAP Axis 2; 16.7%")
# CAP by plotpos
cap_temppP <- CAPdiscrim(disttempP~PlotPos, data = sttemporalP, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 9)
cap_temppP <- add.spec.scores(cap_temppP, dtempP, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
round(cap_temppP$F/sum(cap_temppP$F), digits=3)
barplot(cap_temppP$F/sum(cap_temppP$F))
cap_temppP_points <- bind_cols((as.data.frame(cap_temppP$x)), ftempP)
glimpse(cap_temppP_points)
cap_temppP_arrows <- as.data.frame(cap_temppP$cproj*5) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
ggplot(cap_temppP_points) +
geom_point(aes(x=LD1, y=LD2, colour = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temppP_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temppP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 80.2%",
y = "CAP Axis 2; 18.7%")
# CAP by plot + spider
tempP_centp <- aggregate(cbind(LD1, LD2) ~ PlotPos, data = cap_temppP_points, FUN = mean)
tempP_segsp <- merge(cap_temppP_points, setNames(tempP_centp, c('PlotPos', 'oLD1', 'oLD2')), by = 'PlotPos', sort = FALSE)
ggplot(cap_temppP_points) +
geom_point(aes(x=LD1, y=LD2, colour = PlotPos), size = 3, alpha = .6) +
geom_segment(data = tempP_segsp, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = PlotPos), alpha = .9, size = .3) +
geom_point(data = tempP_centp, mapping = aes(x = LD1, y = LD2, colour = PlotPos), size = 5) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temppP_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temppP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 80.2%",
y = "CAP Axis 2; 18.7%")
# CAP by SamplingPeriod
cap_temppsP <- CAPdiscrim(disttempP~`Sampling Period`, data = sttemporalP, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 999)
cap_temppsP <- add.spec.scores(cap_temppsP, dtempP, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
saveRDS(cap_temppsP, file = "outputs/cap_temppsP.rds")
round(cap_temppsP$F/sum(cap_temppsP$F), digits=3)
barplot(cap_temppsP$F/sum(cap_temppsP$F))
cap_temppsP_points <- bind_cols((as.data.frame(cap_temppsP$x)), ftempP)
glimpse(cap_temppsP_points)
cap_temppsP_arrows <- as.data.frame(cap_temppsP$cproj*5) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
cap_temppsP_arrows
ggplot(cap_temppsP_points) +
geom_point(aes(x=LD1, y=LD2, colour = `Sampling Period`), size = 4) +
scale_colour_manual(values = brewer.pal(n = 6, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temppsP_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temppsP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 65.2%",
y = "CAP Axis 2; 22.6%")
# CAP by SamplingPeriod + spider
tempP_centps <- aggregate(cbind(LD1, LD2) ~ `Sampling Period`, data = cap_temppsP_points, FUN = mean)
tempP_segsps <- merge(cap_temppsP_points, setNames(tempP_centps, c('Sampling Period', 'oLD1', 'oLD2')), by = 'Sampling Period', sort = FALSE)
ggplot(cap_temppsP_points) +
geom_point(aes(x=LD1, y=LD2, colour = `Sampling Period`, shape = PlotPos), size = 2.5, alpha = .4) +
geom_segment(data = tempP_segsps, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = `Sampling Period`), alpha = .9, size = .3) +
geom_point(data = tempP_centps, mapping = aes(x = LD1, y = LD2, colour = `Sampling Period`), size = 8) +
scale_colour_manual(values = brewer.pal(n = 5, name = "Set1")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temppsP_arrows,
x = 0, y = 0, alpha = 0.6,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = cap_temppsP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 5
) +
labs(
x = "CAP Axis 1; 65.2%",
y = "CAP Axis 2; 22.6%",
shape = "Plot position")
#### temporal trends ####
#This needs to be a multi-panel figure(s) y = var, x = date, colour = plot position, thick lines and points = mean, hairlines = toposequences
# 1) TICK - make a df with only vars of interest
# 2) TICK - Make summary df with means by landscape position
# 3) TICK - Plot individuals with feint lines, colours by landscape position
# 4) TICK - Overlay points and thicker lines, colours by landscape position
seasonal <- temporalP %>%
select(-c(VH, VV, pHc, EC, DTN, MBC)) %>%
unite("Tr_PP", Transect:PlotPos, remove = FALSE)
seasonal_vars <- c("Date", "Moisture", "FAA", "NO3", "DON", "NH4", "AvailP", "DOC", "NDVI", "Wet", "Proteolysis", "AAMin_k1", "Gp_Gn", "F_B", "TotalPLFA", "MBN", "MicCN", "Act_Gp", "MicY")
seasonal_sum <- seasonal %>%
group_by(`Sampling Period`, PlotPos) %>%
summarise(across(all_of(seasonal_vars),
list(mean = ~ mean(.x, na.rm = TRUE)))) %>%
ungroup()
prot <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = Proteolysis, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = Proteolysis_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = Proteolysis_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Proteolysis rate"),
colour = "Plot position")
moist <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = Moisture, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = Moisture_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = Moisture_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("MC (g "~g^-1~")"),
colour = "Plot position")
faa <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = FAA, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = FAA_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = FAA_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("FAA-N (mg N "~kg^-1~")"),
colour = "Plot position")
no3 <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = NO3, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = NO3_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = NO3_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression (~NO[3]^{"-"}~"-N (mg "~kg^-1~")"),
colour = "Plot position")
nh4 <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = NH4, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = NH4_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = NH4_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression (~NH[4]^{"+"}~"-N (mg "~kg^-1~")"),
colour = "Plot position")
don <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = DON, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = DON_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = DON_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("DON (mg "~kg^-1~")"),
colour = "Plot position")
doc <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = DOC, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = DOC_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = DOC_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("DOC (mg "~kg^-1~")"),
colour = "Plot position")
availp <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = AvailP, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = AvailP_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = AvailP_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Available P (mg "~kg^-1~")"),
colour = "Plot position")
aak1 <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = AAMin_k1, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = AAMin_k1_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = AAMin_k1_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("AA min ("~h^-1~")"),
colour = "Plot position")
cue <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = MicY, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = MicY_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = MicY_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Amino acid CUE"),
colour = "Plot position")
gpgn <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = Gp_Gn, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = Gp_Gn_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = Gp_Gn_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("G+ : G- ratio"),
colour = "Plot position")
actgp <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = Act_Gp, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = Act_Gp_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = Act_Gp_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Actinomycete : G+ ratio"),
colour = "Plot position")
fb <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = F_B, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = F_B_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = F_B_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Fungal : Bacterial ratio"),
colour = "Plot position")
mbn <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = MBN, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = MBN_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = MBN_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("MBN (mg "~kg^-1~")"),
colour = "Plot position")
miccn <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = MicCN, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = MicCN_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = MicCN_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Microbial biomass C:N ratio"),
colour = "Plot position")
totp <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = TotalPLFA, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = TotalPLFA_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = TotalPLFA_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Total PLFA (nmol "~g^-1~")"),
colour = "Plot position")
ndvi <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = NDVI, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = NDVI_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = NDVI_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("NDVI"),
colour = "Plot position")
wet <- ggplot() +
geom_line(data = seasonal, aes(group = Tr_PP, x = Date, y = Wet, colour = PlotPos), size = 0.05) +
geom_line(data = seasonal_sum, aes(x = Date_mean, y = Wet_mean, colour = PlotPos), size = 1) +
geom_point(data = seasonal_sum, aes(x = Date_mean, y = Wet_mean, colour = PlotPos), size = 2) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
scale_x_date(date_breaks = "3 months" , date_labels = "%b-%y") +
labs(
x = "",
y = expression ("Wetness index"),
colour = "Plot position")
no3 + nh4 + faa + don + doc + availp + prot + aak1 + cue + moist +
plot_annotation(tag_levels = 'a') +
theme(plot.tag.position = c(0, 1),
plot.tag = element_text(size = 16, hjust = 0, vjust = 1)) +
plot_layout(ncol = 2, guides = 'collect') & theme(legend.position = 'bottom')
ndvi + wet + miccn + mbn + totp + fb + gpgn + actgp +
plot_annotation(tag_levels = 'a') +
theme(plot.tag.position = c(0, 1),
plot.tag = element_text(size = 16, hjust = 0, vjust = 1)) +
plot_layout(ncol = 2, guides = 'collect') & theme(legend.position = 'bottom')
#### inflows ####
inflow_raw <- read_csv("data/raw/KPinflows.csv") #read data
inflow_raw$YearTemp = inflow_raw$Year #duplicate year column for onwards
inflow_long <- inflow_raw %>% #put in long form and kill empty space
remove_empty() %>%
pivot_longer(!c(Year, YearTemp), names_to = "Month", values_to = "Inflow")
inflow_long$Month <- gsub("^.{0,4}", "", inflow_long$Month) #Remove filler on date
inflow_long$Month <- paste0(inflow_long$YearTemp,inflow_long$Month) #make full dates
head(inflow_long$Month)
inflow_long$Month <- as_date(inflow_long$Month) #format as date
str(inflow_long)
SplineFun <- splinefun(x = inflow_long$Month, y = inflow_long$Inflow) #splining function
Dates <- seq.Date(ymd("1895-01-01"), ymd("2019-12-31"), by = 1) #Dates filling sequence
SplineFit <- SplineFun(Dates) #apply spline to filling dates
head(SplineFit)
newDF <- data.frame(Dates = Dates, FitData = SplineFit) #glue vecs together
head(newDF)
str(newDF)
newDF$year <- as.numeric(format(newDF$Date,'%Y')) #Pull year into new column
newDF$Dates <- gsub("^.{0,4}", "2000", newDF$Dates) #Put dummy year into "month" so ridges plot aligned
newDF$Dates <- as_date(newDF$Dates) #re-make date type
#Needed for uninterpolated plot
month_levels <- c(
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
)
inflow_long$Month <- factor(inflow_long$Month, levels = month_levels)
str(inflow_long)
#Colours from gradient picking at top of script
inflow_col <- c("#F5E8C4",
"#B77A27",
"#E7D098",
"#EDD9A9",
"#F1E0B4",
"#DBEEEB",
"#F5F1E8",
"#613706",
"#F5EBD0",
"#C8EAE5",
"#BDE6E0",
"#005349",
"#C28734",
"#C99748",
"#187C74",
"#CCEBE6",
"#B8E3DD",
"#F3E3B9",
"#CC9C4E",
"#663A06",
"#5AB2A8",
"#005046",
"#003F33",
"#036860",
"#A36619",
"#3C9C93",
"#298B83",
"#CFA154",
"#1C8078",
"#66BAB0",
"#EBD6A3",
"#9BD8CE",
"#DBBB75",
"#E2F0EE",
"#B37625",
"#F5F3F0",
"#004C42",
"#72C3B8",
"#E9F2F1",
"#90D3C9",
"#84CEC3",
"#CFECE8",
"#9E6216",
"#6A3D07",
"#005A51",
"#734207",
"#A76A1C",
"#42A097",
"#E0C481",
"#814A09",
"#D1A65B",
"#F5F5F5",
"#95D5CC",
"#DEC07B",
"#EDF3F2",
"#E6CD92",
"#60B6AC",
"#00463B",
"#20847C",
"#F5EACC",
"#00493E",
"#003C30",
"#C48C3B",
"#25877F",
"#BB7D2A",
"#0B7068",
"#AB6E1F",
"#F4E6BF",
"#F5F2EC",
"#076C64",
"#EFDCAE",
"#D7EDEA",
"#8E530B",
"#4EA99F",
"#F5EDD8",
"#7ECCC0",
"#A6DCD4",
"#92570E",
"#005D55",
"#004237",
"#00574D",
"#BF822E",
"#D6B168",
"#B2E1DA",
"#ACDFD7",
"#F5E9C8",
"#006158",
"#543005",
"#6CBFB4",
"#C3E8E3",
"#F5ECD4",
"#31938B",
"#F1F4F3",
"#D3EDE9",
"#36978F",
"#54ADA3",
"#A1DAD1",
"#147870",
"#00645C",
"#8A4F09",
"#78C7BC",
"#10746C",
"#965B11",
"#E9D39D",
"#D9B66E",
"#8AD1C6",
"#D4AB61",
"#784508",
"#F5F0E4",
"#E4CA8C",
"#F5EEDC",
"#583205",
"#854D09",
"#6F3F07",
"#AF7222",
"#48A49B",
"#DEEFED",
"#E6F1EF",
"#F5EFE0",
"#E2C787",
"#7C4708",
"#2D8F87",
"#C79141",
"#9A5E14",
"#5D3505")
##without interpolation, will need tweaks as some feed-in code changed
ggplot(inflow_long, aes(x = Month, y = Year, height = Inflow, group = Year, fill = as.factor(Year))) +
geom_ridgeline(stat = "identity", alpha = 0.8, scale = 0.003, min_height = 1, size = 0.2, show.legend = FALSE) +
theme_classic() +
scale_y_reverse(breaks = c(1895, 1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010, 2019), expand = c(0,0), name = "", position = "right") +
scale_x_discrete(expand = c(0,0.1), name = "") +
theme(axis.line.y = element_blank(), axis.ticks.y = element_blank()) +
scale_fill_manual(values = inflow_col)
##with interpolation
ggplot(newDF, aes(x = Dates, y = year, height = FitData, group = year, fill = as.factor(year))) +
geom_ridgeline(stat = "identity", alpha = 0.8, scale = 0.003, min_height = 10, size = 0.2, show.legend = FALSE) +
theme_classic() +
scale_y_reverse(breaks = c(1895, 1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010, 2019),
minor_breaks = seq(1895, 2019, 5),
expand = c(0,0), name = "", position = "right") +
scale_x_date(date_breaks = "1 month", minor_breaks = "1 week", labels=date_format("%b"), expand = c(0,0.1), name = "") +
theme(axis.line.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.x = element_text(hjust = -1.5),
panel.grid.major.y = element_line(color = "black", size = 0.2, linetype = "dotted"),
panel.grid.minor.y = element_line(color = "black", size = 0.2, linetype = "dotted")) +
scale_fill_manual(values = inflow_col)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#' @include arrow-object.R
#' @title class arrow::ExtensionArray
#'
#' @usage NULL
#' @format NULL
#' @docType class
#'
#' @section Methods:
#'
#' The `ExtensionArray` class inherits from `Array`, but also provides
#' access to the underlying storage of the extension.
#'
#' - `$storage()`: Returns the underlying [Array] used to store
#' values.
#'
#' The `ExtensionArray` is not intended to be subclassed for extension
#' types.
#'
#' @rdname ExtensionArray
#' @name ExtensionArray
#' @export
ExtensionArray <- R6Class("ExtensionArray",
inherit = Array,
public = list(
storage = function() {
ExtensionArray__storage(self)
},
as_vector = function() {
self$type$as_vector(self)
}
)
)
ExtensionArray$create <- function(x, type) {
assert_is(type, "ExtensionType")
if (inherits(x, "ExtensionArray") && type$Equals(x$type)) {
return(x)
}
storage <- Array$create(x, type = type$storage_type())
type$WrapArray(storage)
}
#' @title class arrow::ExtensionType
#'
#' @usage NULL
#' @format NULL
#' @docType class
#'
#' @section Methods:
#'
#' The `ExtensionType` class inherits from `DataType`, but also defines
#' extra methods specific to extension types:
#'
#' - `$storage_type()`: Returns the underlying [DataType] used to store
#' values.
#' - `$storage_id()`: Returns the [Type] identifier corresponding to the
#' `$storage_type()`.
#' - `$extension_name()`: Returns the extension name.
#' - `$extension_metadata()`: Returns the serialized version of the extension
#' metadata as a [raw()] vector.
#' - `$extension_metadata_utf8()`: Returns the serialized version of the
#' extension metadata as a UTF-8 encoded string.
#' - `$WrapArray(array)`: Wraps a storage [Array] into an [ExtensionArray]
#' with this extension type.
#'
#' In addition, subclasses may override the following methos to customize
#' the behaviour of extension classes.
#'
#' - `$deserialize_instance()`: This method is called when a new [ExtensionType]
#' is initialized and is responsible for parsing and validating
#' the serialized extension_metadata (a [raw()] vector)
#' such that its contents can be inspected by fields and/or methods
#' of the R6 ExtensionType subclass. Implementations must also check the
#' `storage_type` to make sure it is compatible with the extension type.
#' - `$as_vector(extension_array)`: Convert an [Array] or [ChunkedArray] to an R
#' vector. This method is called by [as.vector()] on [ExtensionArray]
#' objects, when a [RecordBatch] containing an [ExtensionArray] is
#' converted to a [data.frame()], or when a [ChunkedArray] (e.g., a column
#' in a [Table]) is converted to an R vector. The default method returns the
#' converted storage array.
#' - `$ToString()` Return a string representation that will be printed
#' to the console when this type or an Array of this type is printed.
#'
#' @rdname ExtensionType
#' @name ExtensionType
#' @export
ExtensionType <- R6Class("ExtensionType",
inherit = DataType,
public = list(
# In addition to the initialization that occurs for all
# ArrowObject instances, we call deserialize_instance(), which can
# be overridden to populate custom fields
initialize = function(xp) {
super$initialize(xp)
self$deserialize_instance()
},
# Because of how C++ shared_ptr<> objects are converted to R objects,
# the initial object that is instantiated will be of this class
# (ExtensionType), but the R6Class object that was registered is
# available from C++. We need this in order to produce the correct
# R6 subclass when a shared_ptr<ExtensionType> is returned to R.
r6_class = function() {
ExtensionType__r6_class(self)
},
storage_type = function() {
ExtensionType__storage_type(self)
},
storage_id = function() {
self$storage_type()$id
},
extension_name = function() {
ExtensionType__extension_name(self)
},
extension_metadata = function() {
ExtensionType__Serialize(self)
},
# To make sure this conversion is done properly
extension_metadata_utf8 = function() {
metadata_utf8 <- rawToChar(self$extension_metadata())
Encoding(metadata_utf8) <- "UTF-8"
metadata_utf8
},
WrapArray = function(array) {
assert_is(array, "Array")
ExtensionType__MakeArray(self, array$data())
},
deserialize_instance = function() {
# Do nothing by default but allow other classes to override this method
# to populate R6 class members.
},
ExtensionEquals = function(other) {
inherits(other, "ExtensionType") &&
identical(other$extension_name(), self$extension_name()) &&
identical(other$extension_metadata(), self$extension_metadata())
},
as_vector = function(extension_array) {
if (inherits(extension_array, "ChunkedArray")) {
# Converting one array at a time so that users don't have to remember
# to implement two methods. Converting all the storage arrays to
# a ChunkedArray and then converting is probably faster
# (VctrsExtensionType does this).
storage_vectors <- lapply(
seq_len(extension_array$num_chunks) - 1L,
function(i) self$as_vector(extension_array$chunk(i))
)
vctrs::vec_c(!!!storage_vectors)
} else if (inherits(extension_array, "ExtensionArray")) {
extension_array$storage()$as_vector()
} else {
abort(
c(
"`extension_array` must be a ChunkedArray or ExtensionArray",
i = sprintf(
"Got object of type %s",
paste(class(extension_array), collapse = " / ")
)
)
)
}
},
ToString = function() {
# metadata is probably valid UTF-8 (e.g., JSON), but might not be
# and it's confusing to error when printing the object. This herustic
# isn't perfect (but subclasses should override this method anyway)
metadata_raw <- self$extension_metadata()
if (as.raw(0x00) %in% metadata_raw) {
if (length(metadata_raw) > 20) {
sprintf(
"<%s %s...>",
class(self)[1],
paste(format(utils::head(metadata_raw, 20)), collapse = " ")
)
} else {
sprintf(
"<%s %s>",
class(self)[1],
paste(format(metadata_raw), collapse = " ")
)
}
} else {
paste0(class(self)[1], " <", self$extension_metadata_utf8(), ">")
}
}
)
)
# ExtensionType$new() is what gets used by the generated wrapper code to
# create an R6 object when a shared_ptr<DataType> is returned to R and
# that object has type_id() EXTENSION_TYPE. Rather than add complexity
# to the wrapper code, we modify ExtensionType$new() to do what we need
# it to do here (which is to return an instance of a custom R6
# type whose .deserialize_instance method is called to populate custom fields).
ExtensionType$.default_new <- ExtensionType$new
ExtensionType$new <- function(xp) {
super <- ExtensionType$.default_new(xp)
r6_class <- super$r6_class()
if (identical(r6_class$classname, "ExtensionType")) {
super
} else {
r6_class$new(xp)
}
}
ExtensionType$create <- function(storage_type,
extension_name,
extension_metadata = raw(),
type_class = ExtensionType) {
if (is.string(extension_metadata)) {
extension_metadata <- charToRaw(enc2utf8(extension_metadata))
}
assert_that(is.string(extension_name), is.raw(extension_metadata))
assert_is(storage_type, "DataType")
assert_is(type_class, "R6ClassGenerator")
ExtensionType__initialize(
storage_type,
extension_name,
extension_metadata,
type_class
)
}
#' Extension types
#'
#' Extension arrays are wrappers around regular Arrow [Array] objects
#' that provide some customized behaviour and/or storage. A common use-case
#' for extension types is to define a customized conversion between an
#' an Arrow [Array] and an R object when the default conversion is slow
#' or looses metadata important to the interpretation of values in the array.
#' For most types, the built-in
#' [vctrs extension type][vctrs_extension_type] is probably sufficient.
#'
#' These functions create, register, and unregister [ExtensionType]
#' and [ExtensionArray] objects. To use an extension type you will have to:
#'
#' - Define an [R6::R6Class] that inherits from [ExtensionType] and reimplement
#' one or more methods (e.g., `deserialize_instance()`).
#' - Make a type constructor function (e.g., `my_extension_type()`) that calls
#' [new_extension_type()] to create an R6 instance that can be used as a
#' [data type][data-type] elsewhere in the package.
#' - Make an array constructor function (e.g., `my_extension_array()`) that
#' calls [new_extension_array()] to create an [Array] instance of your
#' extension type.
#' - Register a dummy instance of your extension type created using
#' you constructor function using [register_extension_type()].
#'
#' If defining an extension type in an R package, you will probably want to
#' use [reregister_extension_type()] in that package's [.onLoad()] hook
#' since your package will probably get reloaded in the same R session
#' during its development and [register_extension_type()] will error if
#' called twice for the same `extension_name`. For an example of an
#' extension type that uses most of these features, see
#' [vctrs_extension_type()].
#'
#' @param storage_type The [data type][data-type] of the underlying storage
#' array.
#' @param storage_array An [Array] object of the underlying storage.
#' @param extension_type An [ExtensionType] instance.
#' @param extension_name The extension name. This should be namespaced using
#' "dot" syntax (i.e., "some_package.some_type"). The namespace "arrow"
#' is reserved for extension types defined by the Apache Arrow libraries.
#' @param extension_metadata A [raw()] or [character()] vector containing the
#' serialized version of the type. Chatacter vectors must be length 1 and
#' are converted to UTF-8 before converting to [raw()].
#' @param type_class An [R6::R6Class] whose `$new()` class method will be
#' used to construct a new instance of the type.
#'
#' @return
#' - `new_extension_type()` returns an [ExtensionType] instance according
#' to the `type_class` specified.
#' - `new_extension_array()` returns an [ExtensionArray] whose `$type`
#' corresponds to `extension_type`.
#' - `register_extension_type()`, `unregister_extension_type()`
#' and `reregister_extension_type()` return `NULL`, invisibly.
#' @export
#'
#' @examples
#' # Create the R6 type whose methods control how Array objects are
#' # converted to R objects, how equality between types is computed,
#' # and how types are printed.
#' QuantizedType <- R6::R6Class(
#' "QuantizedType",
#' inherit = ExtensionType,
#' public = list(
#' # methods to access the custom metadata fields
#' center = function() private$.center,
#' scale = function() private$.scale,
#'
#' # called when an Array of this type is converted to an R vector
#' as_vector = function(extension_array) {
#' if (inherits(extension_array, "ExtensionArray")) {
#' unquantized_arrow <-
#' (extension_array$storage()$cast(float64()) / private$.scale) +
#' private$.center
#'
#' as.vector(unquantized_arrow)
#' } else {
#' super$as_vector(extension_array)
#' }
#' },
#'
#' # populate the custom metadata fields from the serialized metadata
#' deserialize_instance = function() {
#' vals <- as.numeric(strsplit(self$extension_metadata_utf8(), ";")[[1]])
#' private$.center <- vals[1]
#' private$.scale <- vals[2]
#' }
#' ),
#' private = list(
#' .center = NULL,
#' .scale = NULL
#' )
#' )
#'
#' # Create a helper type constructor that calls new_extension_type()
#' quantized <- function(center = 0, scale = 1, storage_type = int32()) {
#' new_extension_type(
#' storage_type = storage_type,
#' extension_name = "arrow.example.quantized",
#' extension_metadata = paste(center, scale, sep = ";"),
#' type_class = QuantizedType
#' )
#' }
#'
#' # Create a helper array constructor that calls new_extension_array()
#' quantized_array <- function(x, center = 0, scale = 1,
#' storage_type = int32()) {
#' type <- quantized(center, scale, storage_type)
#' new_extension_array(
#' Array$create((x - center) * scale, type = storage_type),
#' type
#' )
#' }
#'
#' # Register the extension type so that Arrow knows what to do when
#' # it encounters this extension type
#' reregister_extension_type(quantized())
#'
#' # Create Array objects and use them!
#' (vals <- runif(5, min = 19, max = 21))
#'
#' (array <- quantized_array(
#' vals,
#' center = 20,
#' scale = 2^15 - 1,
#' storage_type = int16()
#' )
#' )
#'
#' array$type$center()
#' array$type$scale()
#'
#' as.vector(array)
new_extension_type <- function(storage_type,
extension_name,
extension_metadata = raw(),
type_class = ExtensionType) {
ExtensionType$create(
storage_type,
extension_name,
extension_metadata,
type_class
)
}
#' @rdname new_extension_type
#' @export
new_extension_array <- function(storage_array, extension_type) {
ExtensionArray$create(storage_array, extension_type)
}
#' @rdname new_extension_type
#' @export
register_extension_type <- function(extension_type) {
assert_is(extension_type, "ExtensionType")
arrow__RegisterRExtensionType(extension_type)
}
#' @rdname new_extension_type
#' @export
reregister_extension_type <- function(extension_type) {
tryCatch(
register_extension_type(extension_type),
error = function(e) {
unregister_extension_type(extension_type$extension_name())
register_extension_type(extension_type)
}
)
}
#' @rdname new_extension_type
#' @export
unregister_extension_type <- function(extension_name) {
arrow__UnregisterRExtensionType(extension_name)
}
VctrsExtensionType <- R6Class("VctrsExtensionType",
inherit = ExtensionType,
public = list(
ptype = function() {
private$.ptype
},
ToString = function() {
tf <- tempfile()
sink(tf)
on.exit({
sink(NULL)
unlink(tf)
})
print(self$ptype())
paste0(readLines(tf), collapse = "\n")
},
deserialize_instance = function() {
private$.ptype <- unserialize(self$extension_metadata())
},
ExtensionEquals = function(other) {
if (!inherits(other, "VctrsExtensionType")) {
return(FALSE)
}
identical(self$ptype(), other$ptype())
},
as_vector = function(extension_array) {
if (inherits(extension_array, "ChunkedArray")) {
# rather than convert one array at a time, use more Arrow
# machinery to convert the whole ChunkedArray at once
storage_arrays <- lapply(
seq_len(extension_array$num_chunks) - 1L,
function(i) extension_array$chunk(i)$storage()
)
storage <- chunked_array(!!!storage_arrays, type = self$storage_type())
vctrs::vec_restore(storage$as_vector(), self$ptype())
} else if (inherits(extension_array, "Array")) {
vctrs::vec_restore(
super$as_vector(extension_array),
self$ptype()
)
} else {
super$as_vector(extension_array)
}
}
),
private = list(
.ptype = NULL
)
)
#' Extension type for generic typed vectors
#'
#' Most common R vector types are converted automatically to a suitable
#' Arrow [data type][data-type] without the need for an extension type. For
#' vector types whose conversion is not suitably handled by default, you can
#' create a [vctrs_extension_array()], which passes [vctrs::vec_data()] to
#' `Array$create()` and calls [vctrs::vec_restore()] when the [Array] is
#' converted back into an R vector.
#'
#' @param x A vctr (i.e., [vctrs::vec_is()] returns `TRUE`).
#' @param ptype A [vctrs::vec_ptype()], which is usually a zero-length
#' version of the object with the appropriate attributes set. This value
#' will be serialized using [serialize()], so it should not refer to any
#' R object that can't be saved/reloaded.
#' @inheritParams new_extension_type
#'
#' @return
#' - `vctrs_extension_array()` returns an [ExtensionArray] instance with a
#' `vctrs_extension_type()`.
#' - `vctrs_extension_type()` returns an [ExtensionType] instance for the
#' extension name "arrow.r.vctrs".
#' @export
#'
#' @examples
#' (array <- vctrs_extension_array(as.POSIXlt("2022-01-02 03:45", tz = "UTC")))
#' array$type
#' as.vector(array)
#'
#' temp_feather <- tempfile()
#' write_feather(arrow_table(col = array), temp_feather)
#' read_feather(temp_feather)
#' unlink(temp_feather)
vctrs_extension_array <- function(x, ptype = vctrs::vec_ptype(x),
storage_type = NULL) {
if (inherits(x, "ExtensionArray") && inherits(x$type, "VctrsExtensionType")) {
return(x)
}
vctrs::vec_assert(x)
storage <- Array$create(vctrs::vec_data(x), type = storage_type)
type <- vctrs_extension_type(ptype, storage$type)
new_extension_array(storage, type)
}
#' @rdname vctrs_extension_array
#' @export
vctrs_extension_type <- function(x,
storage_type = infer_type(vctrs::vec_data(x))) {
ptype <- vctrs::vec_ptype(x)
new_extension_type(
storage_type = storage_type,
extension_name = "arrow.r.vctrs",
extension_metadata = serialize(ptype, NULL),
type_class = VctrsExtensionType
)
}
| /r/R/extension.R | permissive | tallamjr/arrow | R | false | false | 18,626 | r | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#' @include arrow-object.R
#' @title class arrow::ExtensionArray
#'
#' @usage NULL
#' @format NULL
#' @docType class
#'
#' @section Methods:
#'
#' The `ExtensionArray` class inherits from `Array`, but also provides
#' access to the underlying storage of the extension.
#'
#' - `$storage()`: Returns the underlying [Array] used to store
#' values.
#'
#' The `ExtensionArray` is not intended to be subclassed for extension
#' types.
#'
#' @rdname ExtensionArray
#' @name ExtensionArray
#' @export
ExtensionArray <- R6Class("ExtensionArray",
inherit = Array,
public = list(
storage = function() {
ExtensionArray__storage(self)
},
as_vector = function() {
self$type$as_vector(self)
}
)
)
ExtensionArray$create <- function(x, type) {
assert_is(type, "ExtensionType")
if (inherits(x, "ExtensionArray") && type$Equals(x$type)) {
return(x)
}
storage <- Array$create(x, type = type$storage_type())
type$WrapArray(storage)
}
#' @title class arrow::ExtensionType
#'
#' @usage NULL
#' @format NULL
#' @docType class
#'
#' @section Methods:
#'
#' The `ExtensionType` class inherits from `DataType`, but also defines
#' extra methods specific to extension types:
#'
#' - `$storage_type()`: Returns the underlying [DataType] used to store
#' values.
#' - `$storage_id()`: Returns the [Type] identifier corresponding to the
#' `$storage_type()`.
#' - `$extension_name()`: Returns the extension name.
#' - `$extension_metadata()`: Returns the serialized version of the extension
#' metadata as a [raw()] vector.
#' - `$extension_metadata_utf8()`: Returns the serialized version of the
#' extension metadata as a UTF-8 encoded string.
#' - `$WrapArray(array)`: Wraps a storage [Array] into an [ExtensionArray]
#' with this extension type.
#'
#' In addition, subclasses may override the following methos to customize
#' the behaviour of extension classes.
#'
#' - `$deserialize_instance()`: This method is called when a new [ExtensionType]
#' is initialized and is responsible for parsing and validating
#' the serialized extension_metadata (a [raw()] vector)
#' such that its contents can be inspected by fields and/or methods
#' of the R6 ExtensionType subclass. Implementations must also check the
#' `storage_type` to make sure it is compatible with the extension type.
#' - `$as_vector(extension_array)`: Convert an [Array] or [ChunkedArray] to an R
#' vector. This method is called by [as.vector()] on [ExtensionArray]
#' objects, when a [RecordBatch] containing an [ExtensionArray] is
#' converted to a [data.frame()], or when a [ChunkedArray] (e.g., a column
#' in a [Table]) is converted to an R vector. The default method returns the
#' converted storage array.
#' - `$ToString()` Return a string representation that will be printed
#' to the console when this type or an Array of this type is printed.
#'
#' @rdname ExtensionType
#' @name ExtensionType
#' @export
ExtensionType <- R6Class("ExtensionType",
inherit = DataType,
public = list(
# In addition to the initialization that occurs for all
# ArrowObject instances, we call deserialize_instance(), which can
# be overridden to populate custom fields
initialize = function(xp) {
super$initialize(xp)
self$deserialize_instance()
},
# Because of how C++ shared_ptr<> objects are converted to R objects,
# the initial object that is instantiated will be of this class
# (ExtensionType), but the R6Class object that was registered is
# available from C++. We need this in order to produce the correct
# R6 subclass when a shared_ptr<ExtensionType> is returned to R.
r6_class = function() {
ExtensionType__r6_class(self)
},
storage_type = function() {
ExtensionType__storage_type(self)
},
storage_id = function() {
self$storage_type()$id
},
extension_name = function() {
ExtensionType__extension_name(self)
},
extension_metadata = function() {
ExtensionType__Serialize(self)
},
# To make sure this conversion is done properly
extension_metadata_utf8 = function() {
metadata_utf8 <- rawToChar(self$extension_metadata())
Encoding(metadata_utf8) <- "UTF-8"
metadata_utf8
},
WrapArray = function(array) {
assert_is(array, "Array")
ExtensionType__MakeArray(self, array$data())
},
deserialize_instance = function() {
# Do nothing by default but allow other classes to override this method
# to populate R6 class members.
},
ExtensionEquals = function(other) {
inherits(other, "ExtensionType") &&
identical(other$extension_name(), self$extension_name()) &&
identical(other$extension_metadata(), self$extension_metadata())
},
as_vector = function(extension_array) {
if (inherits(extension_array, "ChunkedArray")) {
# Converting one array at a time so that users don't have to remember
# to implement two methods. Converting all the storage arrays to
# a ChunkedArray and then converting is probably faster
# (VctrsExtensionType does this).
storage_vectors <- lapply(
seq_len(extension_array$num_chunks) - 1L,
function(i) self$as_vector(extension_array$chunk(i))
)
vctrs::vec_c(!!!storage_vectors)
} else if (inherits(extension_array, "ExtensionArray")) {
extension_array$storage()$as_vector()
} else {
abort(
c(
"`extension_array` must be a ChunkedArray or ExtensionArray",
i = sprintf(
"Got object of type %s",
paste(class(extension_array), collapse = " / ")
)
)
)
}
},
ToString = function() {
# metadata is probably valid UTF-8 (e.g., JSON), but might not be
# and it's confusing to error when printing the object. This herustic
# isn't perfect (but subclasses should override this method anyway)
metadata_raw <- self$extension_metadata()
if (as.raw(0x00) %in% metadata_raw) {
if (length(metadata_raw) > 20) {
sprintf(
"<%s %s...>",
class(self)[1],
paste(format(utils::head(metadata_raw, 20)), collapse = " ")
)
} else {
sprintf(
"<%s %s>",
class(self)[1],
paste(format(metadata_raw), collapse = " ")
)
}
} else {
paste0(class(self)[1], " <", self$extension_metadata_utf8(), ">")
}
}
)
)
# ExtensionType$new() is what gets used by the generated wrapper code to
# create an R6 object when a shared_ptr<DataType> is returned to R and
# that object has type_id() EXTENSION_TYPE. Rather than add complexity
# to the wrapper code, we modify ExtensionType$new() to do what we need
# it to do here (which is to return an instance of a custom R6
# type whose .deserialize_instance method is called to populate custom fields).
ExtensionType$.default_new <- ExtensionType$new
ExtensionType$new <- function(xp) {
super <- ExtensionType$.default_new(xp)
r6_class <- super$r6_class()
if (identical(r6_class$classname, "ExtensionType")) {
super
} else {
r6_class$new(xp)
}
}
ExtensionType$create <- function(storage_type,
extension_name,
extension_metadata = raw(),
type_class = ExtensionType) {
if (is.string(extension_metadata)) {
extension_metadata <- charToRaw(enc2utf8(extension_metadata))
}
assert_that(is.string(extension_name), is.raw(extension_metadata))
assert_is(storage_type, "DataType")
assert_is(type_class, "R6ClassGenerator")
ExtensionType__initialize(
storage_type,
extension_name,
extension_metadata,
type_class
)
}
#' Extension types
#'
#' Extension arrays are wrappers around regular Arrow [Array] objects
#' that provide some customized behaviour and/or storage. A common use-case
#' for extension types is to define a customized conversion between an
#' an Arrow [Array] and an R object when the default conversion is slow
#' or looses metadata important to the interpretation of values in the array.
#' For most types, the built-in
#' [vctrs extension type][vctrs_extension_type] is probably sufficient.
#'
#' These functions create, register, and unregister [ExtensionType]
#' and [ExtensionArray] objects. To use an extension type you will have to:
#'
#' - Define an [R6::R6Class] that inherits from [ExtensionType] and reimplement
#' one or more methods (e.g., `deserialize_instance()`).
#' - Make a type constructor function (e.g., `my_extension_type()`) that calls
#' [new_extension_type()] to create an R6 instance that can be used as a
#' [data type][data-type] elsewhere in the package.
#' - Make an array constructor function (e.g., `my_extension_array()`) that
#' calls [new_extension_array()] to create an [Array] instance of your
#' extension type.
#' - Register a dummy instance of your extension type created using
#' you constructor function using [register_extension_type()].
#'
#' If defining an extension type in an R package, you will probably want to
#' use [reregister_extension_type()] in that package's [.onLoad()] hook
#' since your package will probably get reloaded in the same R session
#' during its development and [register_extension_type()] will error if
#' called twice for the same `extension_name`. For an example of an
#' extension type that uses most of these features, see
#' [vctrs_extension_type()].
#'
#' @param storage_type The [data type][data-type] of the underlying storage
#' array.
#' @param storage_array An [Array] object of the underlying storage.
#' @param extension_type An [ExtensionType] instance.
#' @param extension_name The extension name. This should be namespaced using
#' "dot" syntax (i.e., "some_package.some_type"). The namespace "arrow"
#' is reserved for extension types defined by the Apache Arrow libraries.
#' @param extension_metadata A [raw()] or [character()] vector containing the
#' serialized version of the type. Chatacter vectors must be length 1 and
#' are converted to UTF-8 before converting to [raw()].
#' @param type_class An [R6::R6Class] whose `$new()` class method will be
#' used to construct a new instance of the type.
#'
#' @return
#' - `new_extension_type()` returns an [ExtensionType] instance according
#' to the `type_class` specified.
#' - `new_extension_array()` returns an [ExtensionArray] whose `$type`
#' corresponds to `extension_type`.
#' - `register_extension_type()`, `unregister_extension_type()`
#' and `reregister_extension_type()` return `NULL`, invisibly.
#' @export
#'
#' @examples
#' # Create the R6 type whose methods control how Array objects are
#' # converted to R objects, how equality between types is computed,
#' # and how types are printed.
#' QuantizedType <- R6::R6Class(
#' "QuantizedType",
#' inherit = ExtensionType,
#' public = list(
#' # methods to access the custom metadata fields
#' center = function() private$.center,
#' scale = function() private$.scale,
#'
#' # called when an Array of this type is converted to an R vector
#' as_vector = function(extension_array) {
#' if (inherits(extension_array, "ExtensionArray")) {
#' unquantized_arrow <-
#' (extension_array$storage()$cast(float64()) / private$.scale) +
#' private$.center
#'
#' as.vector(unquantized_arrow)
#' } else {
#' super$as_vector(extension_array)
#' }
#' },
#'
#' # populate the custom metadata fields from the serialized metadata
#' deserialize_instance = function() {
#' vals <- as.numeric(strsplit(self$extension_metadata_utf8(), ";")[[1]])
#' private$.center <- vals[1]
#' private$.scale <- vals[2]
#' }
#' ),
#' private = list(
#' .center = NULL,
#' .scale = NULL
#' )
#' )
#'
#' # Create a helper type constructor that calls new_extension_type()
#' quantized <- function(center = 0, scale = 1, storage_type = int32()) {
#' new_extension_type(
#' storage_type = storage_type,
#' extension_name = "arrow.example.quantized",
#' extension_metadata = paste(center, scale, sep = ";"),
#' type_class = QuantizedType
#' )
#' }
#'
#' # Create a helper array constructor that calls new_extension_array()
#' quantized_array <- function(x, center = 0, scale = 1,
#' storage_type = int32()) {
#' type <- quantized(center, scale, storage_type)
#' new_extension_array(
#' Array$create((x - center) * scale, type = storage_type),
#' type
#' )
#' }
#'
#' # Register the extension type so that Arrow knows what to do when
#' # it encounters this extension type
#' reregister_extension_type(quantized())
#'
#' # Create Array objects and use them!
#' (vals <- runif(5, min = 19, max = 21))
#'
#' (array <- quantized_array(
#' vals,
#' center = 20,
#' scale = 2^15 - 1,
#' storage_type = int16()
#' )
#' )
#'
#' array$type$center()
#' array$type$scale()
#'
#' as.vector(array)
new_extension_type <- function(storage_type,
extension_name,
extension_metadata = raw(),
type_class = ExtensionType) {
ExtensionType$create(
storage_type,
extension_name,
extension_metadata,
type_class
)
}
#' @rdname new_extension_type
#' @export
new_extension_array <- function(storage_array, extension_type) {
ExtensionArray$create(storage_array, extension_type)
}
#' @rdname new_extension_type
#' @export
register_extension_type <- function(extension_type) {
assert_is(extension_type, "ExtensionType")
arrow__RegisterRExtensionType(extension_type)
}
#' @rdname new_extension_type
#' @export
reregister_extension_type <- function(extension_type) {
tryCatch(
register_extension_type(extension_type),
error = function(e) {
unregister_extension_type(extension_type$extension_name())
register_extension_type(extension_type)
}
)
}
#' @rdname new_extension_type
#' @export
unregister_extension_type <- function(extension_name) {
arrow__UnregisterRExtensionType(extension_name)
}
VctrsExtensionType <- R6Class("VctrsExtensionType",
inherit = ExtensionType,
public = list(
ptype = function() {
private$.ptype
},
ToString = function() {
tf <- tempfile()
sink(tf)
on.exit({
sink(NULL)
unlink(tf)
})
print(self$ptype())
paste0(readLines(tf), collapse = "\n")
},
deserialize_instance = function() {
private$.ptype <- unserialize(self$extension_metadata())
},
ExtensionEquals = function(other) {
if (!inherits(other, "VctrsExtensionType")) {
return(FALSE)
}
identical(self$ptype(), other$ptype())
},
as_vector = function(extension_array) {
if (inherits(extension_array, "ChunkedArray")) {
# rather than convert one array at a time, use more Arrow
# machinery to convert the whole ChunkedArray at once
storage_arrays <- lapply(
seq_len(extension_array$num_chunks) - 1L,
function(i) extension_array$chunk(i)$storage()
)
storage <- chunked_array(!!!storage_arrays, type = self$storage_type())
vctrs::vec_restore(storage$as_vector(), self$ptype())
} else if (inherits(extension_array, "Array")) {
vctrs::vec_restore(
super$as_vector(extension_array),
self$ptype()
)
} else {
super$as_vector(extension_array)
}
}
),
private = list(
.ptype = NULL
)
)
#' Extension type for generic typed vectors
#'
#' Most common R vector types are converted automatically to a suitable
#' Arrow [data type][data-type] without the need for an extension type. For
#' vector types whose conversion is not suitably handled by default, you can
#' create a [vctrs_extension_array()], which passes [vctrs::vec_data()] to
#' `Array$create()` and calls [vctrs::vec_restore()] when the [Array] is
#' converted back into an R vector.
#'
#' @param x A vctr (i.e., [vctrs::vec_is()] returns `TRUE`).
#' @param ptype A [vctrs::vec_ptype()], which is usually a zero-length
#' version of the object with the appropriate attributes set. This value
#' will be serialized using [serialize()], so it should not refer to any
#' R object that can't be saved/reloaded.
#' @inheritParams new_extension_type
#'
#' @return
#' - `vctrs_extension_array()` returns an [ExtensionArray] instance with a
#' `vctrs_extension_type()`.
#' - `vctrs_extension_type()` returns an [ExtensionType] instance for the
#' extension name "arrow.r.vctrs".
#' @export
#'
#' @examples
#' (array <- vctrs_extension_array(as.POSIXlt("2022-01-02 03:45", tz = "UTC")))
#' array$type
#' as.vector(array)
#'
#' temp_feather <- tempfile()
#' write_feather(arrow_table(col = array), temp_feather)
#' read_feather(temp_feather)
#' unlink(temp_feather)
vctrs_extension_array <- function(x, ptype = vctrs::vec_ptype(x),
storage_type = NULL) {
if (inherits(x, "ExtensionArray") && inherits(x$type, "VctrsExtensionType")) {
return(x)
}
vctrs::vec_assert(x)
storage <- Array$create(vctrs::vec_data(x), type = storage_type)
type <- vctrs_extension_type(ptype, storage$type)
new_extension_array(storage, type)
}
#' @rdname vctrs_extension_array
#' @export
vctrs_extension_type <- function(x,
storage_type = infer_type(vctrs::vec_data(x))) {
ptype <- vctrs::vec_ptype(x)
new_extension_type(
storage_type = storage_type,
extension_name = "arrow.r.vctrs",
extension_metadata = serialize(ptype, NULL),
type_class = VctrsExtensionType
)
}
|
n <- 4
M <- matrix(NA, n,n)
pmax( col(M), row(M)) | /Q030.R | no_license | haradakunihiko/investigation_of_r | R | false | false | 49 | r | n <- 4
M <- matrix(NA, n,n)
pmax( col(M), row(M)) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remove_na_rows_cols.R
\name{remove_na_rows_cols}
\alias{remove_na_rows_cols}
\title{remove_na_rows_cols}
\usage{
remove_na_rows_cols(df, col_perc_max = 10, row_perc_max = 10)
}
\arguments{
\item{row_perc_max}{}
}
\value{
}
\description{
remove_na_rows_cols
}
| /man/remove_na_rows_cols.Rd | permissive | abaghela/functionjunction | R | false | true | 338 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remove_na_rows_cols.R
\name{remove_na_rows_cols}
\alias{remove_na_rows_cols}
\title{remove_na_rows_cols}
\usage{
remove_na_rows_cols(df, col_perc_max = 10, row_perc_max = 10)
}
\arguments{
\item{row_perc_max}{}
}
\value{
}
\description{
remove_na_rows_cols
}
|
#Masking variables
engine$LOAD..kg.->Load
engine$Indicated.Power..Ip..value..kw.->IP
engine$FUEL..ml.min.->Fuel_flow_rate
engine$AIR..mm.of.water.-> Air_flow_rate
engine$CALORIMETER.WATER.FLOW..lph.-> cal_water_flow
engine$ENGINE.WATER.FLOW..lph.-> eng_water_flow
engine$T1...C.-> T1
engine$T2...C.-> T2
engine$T3...C.-> T3
engine$T4...C.-> T4
engine$T5...C.-> T5
engine$T6...C.-> T6
#Masking constants
constants$No..of.cylinders -> no._of_cylinders
constants$Compression.Ratio -> Comp_Ratio
constants$Calorific.value.of.Fuel.KJ.Kg. -> cv_fuel
constants$Fuel.Density.Kg.m.3. -> fuel_density
dia_orifice <- constants$Diameter.of.Air.Intake.Orifice.mm.
Cd <- constants$Orifice.co.efficient.of.discharge
bore <- constants$Bore.mm.
stroke_lt <- constants$Stroke.Length.mm.
Dynamometer_lt <- constants$Dynamometer.arm.Length.mm.
| /Masking_variables.R | no_license | Apoorv978/Automobile-Performance-calculation | R | false | false | 827 | r | #Masking variables
engine$LOAD..kg.->Load
engine$Indicated.Power..Ip..value..kw.->IP
engine$FUEL..ml.min.->Fuel_flow_rate
engine$AIR..mm.of.water.-> Air_flow_rate
engine$CALORIMETER.WATER.FLOW..lph.-> cal_water_flow
engine$ENGINE.WATER.FLOW..lph.-> eng_water_flow
engine$T1...C.-> T1
engine$T2...C.-> T2
engine$T3...C.-> T3
engine$T4...C.-> T4
engine$T5...C.-> T5
engine$T6...C.-> T6
#Masking constants
constants$No..of.cylinders -> no._of_cylinders
constants$Compression.Ratio -> Comp_Ratio
constants$Calorific.value.of.Fuel.KJ.Kg. -> cv_fuel
constants$Fuel.Density.Kg.m.3. -> fuel_density
dia_orifice <- constants$Diameter.of.Air.Intake.Orifice.mm.
Cd <- constants$Orifice.co.efficient.of.discharge
bore <- constants$Bore.mm.
stroke_lt <- constants$Stroke.Length.mm.
Dynamometer_lt <- constants$Dynamometer.arm.Length.mm.
|
#' Script for generating (fake) prediction data for use in phystables poster/write-up
rm(list=ls())
setwd("/Users/erikbrockbank/web/vullab/data_analysis/phystables_env/")
library(tidyverse)
# Set levels and labels for containment, complexity data (should align with real data graphs)
containment.levels = c(1, 2, 3)
complexity.levels = c(0, 1, 2, 3)
containment.labels = c(
'1' = "low containment",
'2' = "medium containment",
'3' = "high containment"
)
complexity.labels = c(
'0' = "none",
'1' = "low",
'2' = "medium",
'3' = "high"
)
# Data frame template for generated data
data.template = data.frame(
'containment' = numeric(), # values from containment.levels above
'complexity' = numeric(), # values from complexity.levels above
'response.time' = numeric() # values will be continuous (fake) RTs
)
# Graph theme copied over from analysis script `data_processing.R`
default.theme = theme(
# titles
plot.title = element_text(face = "bold", size = 64, hjust = 0.5),
axis.title.y = element_text(face = "bold", size = 48),
axis.title.x = element_text(face = "bold", size = 48),
# axis text
axis.text.y = element_blank(),
axis.text.x = element_text(face = "bold", size = 24, vjust = 0.65, hjust = 0.5, angle = 45),
# facet text
strip.text = element_text(face = "bold", size = 36),
# backgrounds, lines
panel.background = element_blank(),
strip.background = element_blank(),
panel.grid = element_blank(),
axis.line = element_line(color = "black")
)
### PREDICTIONS: SIMULATION ONLY ###
data.sim = data.template
increment = 250 # ms used as starting point, will not be displayed numerically
for (containment in containment.levels) {
# min.rt = match(containment, containment.levels) * increment
min.rt = increment
# max.rt = match(containment, containment.levels) * increment + length(complexity.levels) * increment
max.rt = length(complexity.levels) * increment
rt.vals = seq(from = min.rt,
to = max.rt,
by = increment)
data.sim = rbind(data.sim, data.frame(containment = containment,
complexity = complexity.levels,
response.time = rt.vals))
}
data.sim %>%
ggplot(aes(x = complexity, y = response.time)) +
geom_point(size = 5, color = "red") +
geom_line(size = 2, color = "red") +
scale_x_continuous(labels = complexity.labels) +
facet_wrap(.~containment,
scales = "free",
labeller = labeller(containment = containment.labels),
strip.position = "right") +
scale_y_continuous(limits = c(0, 1250), breaks = seq(0, 1250, by = increment)) +
labs(x = "", y = "RT") +
ggtitle("Simulation only") +
default.theme +
theme()
### PREDICTIONS: TOPOLOGY ONLY ###
data.top = data.template
increment = 250 # ms used as starting point, will not be displayed numerically
for (containment in containment.levels) {
# min.rt = (length(containment) + 1 - match(containment, containment.levels)) * increment
# max.rt = (length(containment) + 1 - match(containment, containment.levels)) * increment + length(complexity.levels) * increment
rt.level = (length(containment.levels) + 1 - match(containment, containment.levels)) * increment
rt.vals = seq(from = rt.level,
to = rt.level + 1,
by = increment)
data.top = rbind(data.top, data.frame(containment = containment,
complexity = complexity.levels,
response.time = rt.vals))
}
data.top %>%
ggplot(aes(x = complexity, y = response.time)) +
geom_point(size = 5, color = "red") +
geom_line(size = 2, color = "red") +
scale_x_continuous(labels = complexity.labels) +
facet_wrap(.~containment,
scales = "free",
labeller = labeller(containment = containment.labels),
strip.position = "right") +
scale_y_continuous(limits = c(0, 1000), breaks = seq(0, 1000, by = increment)) +
labs(x = "", y = "RT") +
ggtitle("Topology only") +
default.theme
### PREDICTIONS: META-REASONING ###
data.meta = data.template
increment = 250 # ms used as starting point, will not be displayed numerically
for (containment in containment.levels) {
# min.rt = match(containment, containment.levels) * increment
min.rt = increment
# max.rt = match(containment, containment.levels) * increment + length(complexity.levels) * increment
max.rt = length(complexity.levels) * increment
rt.vals = seq(from = min.rt,
to = max.rt + 1,
by = increment)
if (match(containment, containment.levels) == length(containment.levels) - 1) {
rt.vals[4] = rt.vals[3]
}
if (match(containment, containment.levels) == length(containment.levels)) {
rt.vals[3] = rt.vals[2]
rt.vals[4] = rt.vals[2]
}
data.meta = rbind(data.meta, data.frame(containment = containment,
complexity = complexity.levels,
response.time = rt.vals))
}
data.meta %>%
ggplot(aes(x = complexity, y = response.time)) +
geom_point(size = 5, color = "red") +
geom_line(size = 2, color = "red") +
scale_x_continuous(labels = complexity.labels) +
facet_wrap(.~containment,
scales = "free",
labeller = labeller(containment = containment.labels),
strip.position = "right") +
scale_y_continuous(limits = c(0, 1250), breaks = seq(0, 1250, by = increment)) +
labs(x = "Simulation complexity", y = "RT") +
ggtitle("Flexible reasoning") +
default.theme +
theme()
| /phystables_env_data/prediction_data_generation.R | no_license | erik-brockbank/data_analysis | R | false | false | 5,627 | r | #' Script for generating (fake) prediction data for use in phystables poster/write-up
rm(list=ls())
setwd("/Users/erikbrockbank/web/vullab/data_analysis/phystables_env/")
library(tidyverse)
# Set levels and labels for containment, complexity data (should align with real data graphs)
containment.levels = c(1, 2, 3)
complexity.levels = c(0, 1, 2, 3)
containment.labels = c(
'1' = "low containment",
'2' = "medium containment",
'3' = "high containment"
)
complexity.labels = c(
'0' = "none",
'1' = "low",
'2' = "medium",
'3' = "high"
)
# Data frame template for generated data
data.template = data.frame(
'containment' = numeric(), # values from containment.levels above
'complexity' = numeric(), # values from complexity.levels above
'response.time' = numeric() # values will be continuous (fake) RTs
)
# Graph theme copied over from analysis script `data_processing.R`
default.theme = theme(
# titles
plot.title = element_text(face = "bold", size = 64, hjust = 0.5),
axis.title.y = element_text(face = "bold", size = 48),
axis.title.x = element_text(face = "bold", size = 48),
# axis text
axis.text.y = element_blank(),
axis.text.x = element_text(face = "bold", size = 24, vjust = 0.65, hjust = 0.5, angle = 45),
# facet text
strip.text = element_text(face = "bold", size = 36),
# backgrounds, lines
panel.background = element_blank(),
strip.background = element_blank(),
panel.grid = element_blank(),
axis.line = element_line(color = "black")
)
### PREDICTIONS: SIMULATION ONLY ###
data.sim = data.template
increment = 250 # ms used as starting point, will not be displayed numerically
for (containment in containment.levels) {
# min.rt = match(containment, containment.levels) * increment
min.rt = increment
# max.rt = match(containment, containment.levels) * increment + length(complexity.levels) * increment
max.rt = length(complexity.levels) * increment
rt.vals = seq(from = min.rt,
to = max.rt,
by = increment)
data.sim = rbind(data.sim, data.frame(containment = containment,
complexity = complexity.levels,
response.time = rt.vals))
}
data.sim %>%
ggplot(aes(x = complexity, y = response.time)) +
geom_point(size = 5, color = "red") +
geom_line(size = 2, color = "red") +
scale_x_continuous(labels = complexity.labels) +
facet_wrap(.~containment,
scales = "free",
labeller = labeller(containment = containment.labels),
strip.position = "right") +
scale_y_continuous(limits = c(0, 1250), breaks = seq(0, 1250, by = increment)) +
labs(x = "", y = "RT") +
ggtitle("Simulation only") +
default.theme +
theme()
### PREDICTIONS: TOPOLOGY ONLY ###
data.top = data.template
increment = 250 # ms used as starting point, will not be displayed numerically
for (containment in containment.levels) {
# min.rt = (length(containment) + 1 - match(containment, containment.levels)) * increment
# max.rt = (length(containment) + 1 - match(containment, containment.levels)) * increment + length(complexity.levels) * increment
rt.level = (length(containment.levels) + 1 - match(containment, containment.levels)) * increment
rt.vals = seq(from = rt.level,
to = rt.level + 1,
by = increment)
data.top = rbind(data.top, data.frame(containment = containment,
complexity = complexity.levels,
response.time = rt.vals))
}
data.top %>%
ggplot(aes(x = complexity, y = response.time)) +
geom_point(size = 5, color = "red") +
geom_line(size = 2, color = "red") +
scale_x_continuous(labels = complexity.labels) +
facet_wrap(.~containment,
scales = "free",
labeller = labeller(containment = containment.labels),
strip.position = "right") +
scale_y_continuous(limits = c(0, 1000), breaks = seq(0, 1000, by = increment)) +
labs(x = "", y = "RT") +
ggtitle("Topology only") +
default.theme
### PREDICTIONS: META-REASONING ###
data.meta = data.template
increment = 250 # ms used as starting point, will not be displayed numerically
for (containment in containment.levels) {
# min.rt = match(containment, containment.levels) * increment
min.rt = increment
# max.rt = match(containment, containment.levels) * increment + length(complexity.levels) * increment
max.rt = length(complexity.levels) * increment
rt.vals = seq(from = min.rt,
to = max.rt + 1,
by = increment)
if (match(containment, containment.levels) == length(containment.levels) - 1) {
rt.vals[4] = rt.vals[3]
}
if (match(containment, containment.levels) == length(containment.levels)) {
rt.vals[3] = rt.vals[2]
rt.vals[4] = rt.vals[2]
}
data.meta = rbind(data.meta, data.frame(containment = containment,
complexity = complexity.levels,
response.time = rt.vals))
}
data.meta %>%
ggplot(aes(x = complexity, y = response.time)) +
geom_point(size = 5, color = "red") +
geom_line(size = 2, color = "red") +
scale_x_continuous(labels = complexity.labels) +
facet_wrap(.~containment,
scales = "free",
labeller = labeller(containment = containment.labels),
strip.position = "right") +
scale_y_continuous(limits = c(0, 1250), breaks = seq(0, 1250, by = increment)) +
labs(x = "Simulation complexity", y = "RT") +
ggtitle("Flexible reasoning") +
default.theme +
theme()
|
createCondition<-function(data,listid=c(listid1,listid2,...),newvariablename,conditionLevel=c(conditionLevel1,conditionLevel2,...)){
}
| /R/createCondition.R | permissive | guillaumechaumet/pasta | R | false | false | 135 | r | createCondition<-function(data,listid=c(listid1,listid2,...),newvariablename,conditionLevel=c(conditionLevel1,conditionLevel2,...)){
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_prediction_sites.R
\name{calc_prediction_sites}
\alias{calc_prediction_sites}
\title{Calculate prediction sites for 'SSN' object.}
\usage{
calc_prediction_sites(predictions, dist = NULL, nsites = 10, netIDs = NULL)
}
\arguments{
\item{predictions}{string giving the name for the prediction sites map.}
\item{dist}{number giving the distance between the points to create in map
units.}
\item{nsites}{integer giving the approximate number of sites to create}
\item{netIDs}{integer (optional): create prediction sites only on streams with
these netID(s).}
}
\description{
A vector (points) map of prediction sites is created and several
attributes are assigned.
}
\details{
Either \code{dist} or \code{nsites} must be provided. If \code{dist}
is NULL, it is estimated by dividing the total stream length in the map by
\code{nsites}; the number of sites actually derived might therefore be a bit
smaller than \code{nsites}.
Steps include:
\itemize{
\item{Place points on edges with given distance from each other}
\item{Save the point coordinates in NEAR_X and NEAR_Y.}
\item{Assign unique identifiers (needed by the 'SSN' package) 'pid'
and 'locID'.}
\item{Get 'rid' and 'netID' of the stream segment the site
intersects with (from map 'edges').}
\item{Calculate upstream distance for
each point ('upDist').}
\item{Calculate distance ratio ('distRatio') between
position of the site on the edge (= distance traveled from lower end of the
edge to the site) and the total length of the edge.} }
'pid' and 'locID' are identical, unique numbers. 'upDist' is calculated using
\href{https://grass.osgeo.org/grass72/manuals/addons/r.stream.distance.html}{r.stream.distance}.
Points are created using
\href{https://grass.osgeo.org/grass72/manuals/v.segment.html}{v.segment}.
}
\note{
\code{\link{import_data}}, \code{\link{derive_streams}} and
\code{\link{calc_edges}} must be run before.
}
\examples{
\donttest{
# Initiate GRASS session
if(.Platform$OS.type == "windows"){
gisbase = "c:/Program Files/GRASS GIS 7.6"
} else {
gisbase = "/usr/lib/grass74/"
}
initGRASS(gisBase = gisbase,
home = tempdir(),
override = TRUE)
# Load files into GRASS
dem_path <- system.file("extdata", "nc", "elev_ned_30m.tif", package = "openSTARS")
sites_path <- system.file("extdata", "nc", "sites_nc.shp", package = "openSTARS")
setup_grass_environment(dem = dem_path)
import_data(dem = dem_path, sites = sites_path)
gmeta()
# Derive streams from DEM
derive_streams(burn = 0, accum_threshold = 700, condition = TRUE, clean = TRUE)
check_compl_confluences()
calc_edges()
calc_sites()
calc_prediction_sites(predictions = "preds", dist = 2500)
library(sp)
dem <- readRAST('dem', ignore.stderr = TRUE)
sites <- readVECT('sites', ignore.stderr = TRUE)
preds <- readVECT('preds', ignore.stderr = TRUE)
edges <- readVECT('edges', ignore.stderr = TRUE)
plot(dem, col = terrain.colors(20))
lines(edges, col = 'blue', lwd = 2)
points(sites, pch = 4)
points(preds, pch = 19, col = "steelblue")
}
}
\author{
Mira Kattwinkel \email{mira.kattwinkel@gmx.net}
}
| /man/calc_prediction_sites.Rd | permissive | HunterGleason/openSTARS | R | false | true | 3,133 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_prediction_sites.R
\name{calc_prediction_sites}
\alias{calc_prediction_sites}
\title{Calculate prediction sites for 'SSN' object.}
\usage{
calc_prediction_sites(predictions, dist = NULL, nsites = 10, netIDs = NULL)
}
\arguments{
\item{predictions}{string giving the name for the prediction sites map.}
\item{dist}{number giving the distance between the points to create in map
units.}
\item{nsites}{integer giving the approximate number of sites to create}
\item{netIDs}{integer (optional): create prediction sites only on streams with
these netID(s).}
}
\description{
A vector (points) map of prediction sites is created and several
attributes are assigned.
}
\details{
Either \code{dist} or \code{nsites} must be provided. If \code{dist}
is NULL, it is estimated by dividing the total stream length in the map by
\code{nsites}; the number of sites actually derived might therefore be a bit
smaller than \code{nsites}.
Steps include:
\itemize{
\item{Place points on edges with given distance from each other}
\item{Save the point coordinates in NEAR_X and NEAR_Y.}
\item{Assign unique identifiers (needed by the 'SSN' package) 'pid'
and 'locID'.}
\item{Get 'rid' and 'netID' of the stream segment the site
intersects with (from map 'edges').}
\item{Calculate upstream distance for
each point ('upDist').}
\item{Calculate distance ratio ('distRatio') between
position of the site on the edge (= distance traveled from lower end of the
edge to the site) and the total length of the edge.} }
'pid' and 'locID' are identical, unique numbers. 'upDist' is calculated using
\href{https://grass.osgeo.org/grass72/manuals/addons/r.stream.distance.html}{r.stream.distance}.
Points are created using
\href{https://grass.osgeo.org/grass72/manuals/v.segment.html}{v.segment}.
}
\note{
\code{\link{import_data}}, \code{\link{derive_streams}} and
\code{\link{calc_edges}} must be run before.
}
\examples{
\donttest{
# Initiate GRASS session
if(.Platform$OS.type == "windows"){
gisbase = "c:/Program Files/GRASS GIS 7.6"
} else {
gisbase = "/usr/lib/grass74/"
}
initGRASS(gisBase = gisbase,
home = tempdir(),
override = TRUE)
# Load files into GRASS
dem_path <- system.file("extdata", "nc", "elev_ned_30m.tif", package = "openSTARS")
sites_path <- system.file("extdata", "nc", "sites_nc.shp", package = "openSTARS")
setup_grass_environment(dem = dem_path)
import_data(dem = dem_path, sites = sites_path)
gmeta()
# Derive streams from DEM
derive_streams(burn = 0, accum_threshold = 700, condition = TRUE, clean = TRUE)
check_compl_confluences()
calc_edges()
calc_sites()
calc_prediction_sites(predictions = "preds", dist = 2500)
library(sp)
dem <- readRAST('dem', ignore.stderr = TRUE)
sites <- readVECT('sites', ignore.stderr = TRUE)
preds <- readVECT('preds', ignore.stderr = TRUE)
edges <- readVECT('edges', ignore.stderr = TRUE)
plot(dem, col = terrain.colors(20))
lines(edges, col = 'blue', lwd = 2)
points(sites, pch = 4)
points(preds, pch = 19, col = "steelblue")
}
}
\author{
Mira Kattwinkel \email{mira.kattwinkel@gmx.net}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcdTNZ.r
\name{calcdTNZ}
\alias{calcdTNZ}
\title{dTNZ, the Distance from the Thermoneutral Zone}
\usage{
calcdTNZ(ht, wt, age, gender, clo, vel, tskObs, taObs, met, rh, deltaT =.1,
fBasMet = "rosa", fSA = "duBois", percCov = 0, TcMin = 36, TcMax = 38,
plotZone = FALSE)
}
\arguments{
\item{ht}{a numeric value presenting body height in [cm]}
\item{wt}{a numeric value presenting body weight in [kg]}
\item{age}{a numeric value presenting the age in [years]}
\item{gender}{a numeric value presenting sex (female = 1, male = 2)}
\item{clo}{a numeric value presenting clothing insulation level in [clo]}
\item{vel}{a numeric value presenting air velocity in [m/s]}
\item{tskObs}{a numeric value presenting actual mean skin temperature in [degree C]}
\item{taObs}{a numeric value presenting air temperaturein [degree C]}
\item{met}{a numeric value presenting metabolic rate (activity related) in [met]}
\item{rh}{a numeric value presenting realtive humidity in [\%]}
\item{deltaT}{a numeric value presenting the resolution of the matrix to be used}
\item{fBasMet}{a string presenting the method of calculating basal metbolic
rate. Needs to be one of "rosa", "harris", "miflin", or "fixed". Fixed will result in the value of 58.2 W/m2.}
\item{fSA}{a string presenting the method of calculating the surface area.
Needs to be one of "duBois" or "mosteller".}
\item{percCov}{a numeric value between 0 and 1 presenting the percentage of
the body covered by clothes in [\%]}
\item{TcMin}{a numeric value presenting the minimum allowed core temperature
in [degree C].}
\item{TcMax}{a numeric value presenting the maximum allowed core temperature
in [degree C].}
\item{plotZone}{a boolean variable TRUE or FALSE stating, wether TNZ should
be plotted or not.}
}
\value{
\code{calcdTNZ} returns a dataframe with the columns dTNZ, dTNZTs, dTNZTa. Thereby \cr{
\code{dTNZ} The absolute distance to the centroid of the thermoneutral zone \cr
\code{dTNZTs} Relative value of distance assuming skin temperature to be dominant for sensation\cr
\code{dTNZTa} Relative value of distance assuming ambient temperature to be dominant for sensation \cr
}
}
\description{
calcdTNZ calculates the distance from the thermoneutral zone,
either skin temperature or room air related.
}
\details{
The percentage of the body covered by clothes can be estimated e.g.
based on ISO 9920 Appendix H (Figure H.1). A typical winter case leads to a
value of around .86, in the summer case this goes down to values around .68.
}
\note{
This function was used in earlier versions of TNZ calculation (see references
above). The newest version is \code{calcTNZPDF}.In case one of the variables
is not given, a standard value will be taken from a list (see
\code{\link{createCond}} for details.
}
\examples{
## Calculate all values
calcdTNZ(171, 71, 45, 1, .6, .12, 37.8, 25.3, 1.1, 50)
}
\references{
Kingma, Schweiker, Wagner & van Marken Lichtenbelt
Exploring the potential of a biophysical model to understand thermal sensation
Proceedings of 9th Windsor Conference: Making Comfort Relevant Cumberland
Lodge, Windsor, UK, 2016.
Kingma & van Marken Lichtenbelt (2015) <doi:10.1038/nclimate2741>
Kingma, Frijns, Schellen & van Marken Lichtenbelt (2014) <doi:10.4161/temp.29702>
}
\seealso{
see also \code{\link{calcTNZPDF}} and \code{\link{calcComfInd}}
}
\author{
Marcel Schweiker and Boris Kingma
}
| /man/calcdTNZ.Rd | no_license | cran/comf | R | false | true | 3,567 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcdTNZ.r
\name{calcdTNZ}
\alias{calcdTNZ}
\title{dTNZ, the Distance from the Thermoneutral Zone}
\usage{
calcdTNZ(ht, wt, age, gender, clo, vel, tskObs, taObs, met, rh, deltaT =.1,
fBasMet = "rosa", fSA = "duBois", percCov = 0, TcMin = 36, TcMax = 38,
plotZone = FALSE)
}
\arguments{
\item{ht}{a numeric value presenting body height in [cm]}
\item{wt}{a numeric value presenting body weight in [kg]}
\item{age}{a numeric value presenting the age in [years]}
\item{gender}{a numeric value presenting sex (female = 1, male = 2)}
\item{clo}{a numeric value presenting clothing insulation level in [clo]}
\item{vel}{a numeric value presenting air velocity in [m/s]}
\item{tskObs}{a numeric value presenting actual mean skin temperature in [degree C]}
\item{taObs}{a numeric value presenting air temperaturein [degree C]}
\item{met}{a numeric value presenting metabolic rate (activity related) in [met]}
\item{rh}{a numeric value presenting realtive humidity in [\%]}
\item{deltaT}{a numeric value presenting the resolution of the matrix to be used}
\item{fBasMet}{a string presenting the method of calculating basal metbolic
rate. Needs to be one of "rosa", "harris", "miflin", or "fixed". Fixed will result in the value of 58.2 W/m2.}
\item{fSA}{a string presenting the method of calculating the surface area.
Needs to be one of "duBois" or "mosteller".}
\item{percCov}{a numeric value between 0 and 1 presenting the percentage of
the body covered by clothes in [\%]}
\item{TcMin}{a numeric value presenting the minimum allowed core temperature
in [degree C].}
\item{TcMax}{a numeric value presenting the maximum allowed core temperature
in [degree C].}
\item{plotZone}{a boolean variable TRUE or FALSE stating, wether TNZ should
be plotted or not.}
}
\value{
\code{calcdTNZ} returns a dataframe with the columns dTNZ, dTNZTs, dTNZTa. Thereby \cr{
\code{dTNZ} The absolute distance to the centroid of the thermoneutral zone \cr
\code{dTNZTs} Relative value of distance assuming skin temperature to be dominant for sensation\cr
\code{dTNZTa} Relative value of distance assuming ambient temperature to be dominant for sensation \cr
}
}
\description{
calcdTNZ calculates the distance from the thermoneutral zone,
either skin temperature or room air related.
}
\details{
The percentage of the body covered by clothes can be estimated e.g.
based on ISO 9920 Appendix H (Figure H.1). A typical winter case leads to a
value of around .86, in the summer case this goes down to values around .68.
}
\note{
This function was used in earlier versions of TNZ calculation (see references
above). The newest version is \code{calcTNZPDF}.In case one of the variables
is not given, a standard value will be taken from a list (see
\code{\link{createCond}} for details.
}
\examples{
## Calculate all values
calcdTNZ(171, 71, 45, 1, .6, .12, 37.8, 25.3, 1.1, 50)
}
\references{
Kingma, Schweiker, Wagner & van Marken Lichtenbelt
Exploring the potential of a biophysical model to understand thermal sensation
Proceedings of 9th Windsor Conference: Making Comfort Relevant Cumberland
Lodge, Windsor, UK, 2016.
Kingma & van Marken Lichtenbelt (2015) <doi:10.1038/nclimate2741>
Kingma, Frijns, Schellen & van Marken Lichtenbelt (2014) <doi:10.4161/temp.29702>
}
\seealso{
see also \code{\link{calcTNZPDF}} and \code{\link{calcComfInd}}
}
\author{
Marcel Schweiker and Boris Kingma
}
|
# Reading data from APIs
# install.packages("httr")
# Load libraries
library(httr)
library(RJSONIO)
library(jsonlite)
# Accessing Twitter from R
myapp = oauth_app("twitter", key="1qzf050hx4mKfXBsJmUshZys7",secret="BtfRoDztYaJ3E7Or3sPia5EQXH7ErQ9Yu0GFs6z1uMrHBKjnK0")
sig = sign_oauth1.0(myapp, token = "709291514-kSRN9tgbrq11SzcpzpfOfPbq7WMC2pVzIAHfpXsi", token_secret = "eT0YLayubZAAQlALbuwnNkxDlvaxUus4qdVIVj4nPj1nV")
homeTL = GET("https://api.twitter.com/1.1/statuses/home_timeline.json", sig)
# Converting the json object
json1 = content(homeTL)
json2 = jsonlite::fromJSON(toJSON(json1))
json2[1,1:4]
# Getting Tweets from user
myapp = oauth_app("twitter", key="1qzf050hx4mKfXBsJmUshZys7",secret="BtfRoDztYaJ3E7Or3sPia5EQXH7ErQ9Yu0GFs6z1uMrHBKjnK0")
sig = sign_oauth1.0(myapp, token = "709291514-kSRN9tgbrq11SzcpzpfOfPbq7WMC2pVzIAHfpXsi", token_secret = "eT0YLayubZAAQlALbuwnNkxDlvaxUus4qdVIVj4nPj1nV")
search_tw = GET("https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=_rgmendes&count=10", sig)
json3 = content(search_tw)
json4 = jsonlite::fromJSON(toJSON(json3))
json4$text
| /03_Getting_and_Cleaning_Data/scripts/reading_data_from_APIs.R | no_license | rgmendes/Data_Science_Specialization | R | false | false | 1,103 | r | # Reading data from APIs
# install.packages("httr")
# Load libraries
library(httr)
library(RJSONIO)
library(jsonlite)
# Accessing Twitter from R
myapp = oauth_app("twitter", key="1qzf050hx4mKfXBsJmUshZys7",secret="BtfRoDztYaJ3E7Or3sPia5EQXH7ErQ9Yu0GFs6z1uMrHBKjnK0")
sig = sign_oauth1.0(myapp, token = "709291514-kSRN9tgbrq11SzcpzpfOfPbq7WMC2pVzIAHfpXsi", token_secret = "eT0YLayubZAAQlALbuwnNkxDlvaxUus4qdVIVj4nPj1nV")
homeTL = GET("https://api.twitter.com/1.1/statuses/home_timeline.json", sig)
# Converting the json object
json1 = content(homeTL)
json2 = jsonlite::fromJSON(toJSON(json1))
json2[1,1:4]
# Getting Tweets from user
myapp = oauth_app("twitter", key="1qzf050hx4mKfXBsJmUshZys7",secret="BtfRoDztYaJ3E7Or3sPia5EQXH7ErQ9Yu0GFs6z1uMrHBKjnK0")
sig = sign_oauth1.0(myapp, token = "709291514-kSRN9tgbrq11SzcpzpfOfPbq7WMC2pVzIAHfpXsi", token_secret = "eT0YLayubZAAQlALbuwnNkxDlvaxUus4qdVIVj4nPj1nV")
search_tw = GET("https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=_rgmendes&count=10", sig)
json3 = content(search_tw)
json4 = jsonlite::fromJSON(toJSON(json3))
json4$text
|
hpc <- read.table("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
hpc1 <- subset(hpc, Date %in% c("1/2/2007","2/2/2007"))
hpc1$Date <- as.Date(hpc1$Date, format="%d/%m/%Y")
png(file="Plot1.png",width = 480,height = 480)
hist(hpc1$Global_active_power,xlab="Global Active Power (kilowatts)",ylab = "Frequency",main = "Global Active Power",col="red")
dev.off() | /plot1.R | no_license | aftabsorwar/ExData_Plotting1 | R | false | false | 478 | r | hpc <- read.table("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
hpc1 <- subset(hpc, Date %in% c("1/2/2007","2/2/2007"))
hpc1$Date <- as.Date(hpc1$Date, format="%d/%m/%Y")
png(file="Plot1.png",width = 480,height = 480)
hist(hpc1$Global_active_power,xlab="Global Active Power (kilowatts)",ylab = "Frequency",main = "Global Active Power",col="red")
dev.off() |
test_that("an error occurs when the sum of template and model writers is greater than the total number of CSAFE writers", {
expect_error(select_csafe_docs(num_template_writers = 200,
template_sessions = 1,
template_reps = 1,
template_prompts = "London Letter",
template_seed = 100,
num_model_writers = 300,
model_sessions = 1,
model_reps = c(1,2,3),
model_prompts = "Wizard of Oz",
model_seed = 101,
questioned_sessions = 3,
questioned_reps = 1,
questioned_prompts = "Wizard of Oz"))
})
test_that("no writers are in both the template data frame and the model data frame", {
docs1 <- select_csafe_docs(num_template_writers = 50,
template_sessions = 1,
template_reps = 1,
template_prompts = "London Letter",
template_seed = 100,
num_model_writers = 40,
model_sessions = 1,
model_reps = c(1,2,3),
model_prompts = "Wizard of Oz",
model_seed = 101,
questioned_sessions = 3,
questioned_reps = 1,
questioned_prompts = "Wizard of Oz")
docs2 <- select_csafe_docs(num_template_writers = 50,
template_sessions = c(1,2,3),
template_reps = c(1,2,3),
template_prompts = "London Letter",
template_seed = 200,
num_model_writers = 40,
model_sessions = 1,
model_reps = c(1,2,3),
model_prompts = "Wizard of Oz",
model_seed = 300,
questioned_sessions = 3,
questioned_reps = 1,
questioned_prompts = "Wizard of Oz")
expect_equal(length(intersect(docs1$template$writer, docs1$model$writer)), 0)
expect_equal(length(intersect(docs2$template$writer, docs2$model$writer)), 0)
})
test_that("the writers are the same in the model and questioned data frames", {
docs <- select_csafe_docs(num_template_writers = 50,
template_sessions = 1,
template_reps = 1,
template_prompts = "London Letter",
template_seed = 100,
num_model_writers = 40,
model_sessions = 1,
model_reps = c(1,2,3),
model_prompts = "Wizard of Oz",
model_seed = 101,
questioned_sessions = 3,
questioned_reps = 1,
questioned_prompts = "Wizard of Oz")
expect_identical(unique(docs$model$writer), unique(docs$questioned$writer))
})
test_that("the documents are different in the model and questioned data frames", {
docs <- select_csafe_docs(num_template_writers = 50,
template_sessions = 1,
template_reps = 1,
template_prompts = "London Letter",
template_seed = 100,
num_model_writers = 40,
model_sessions = 1,
model_reps = c(1,2,3),
model_prompts = "Wizard of Oz",
model_seed = 101,
questioned_sessions = 3,
questioned_reps = 1,
questioned_prompts = "Wizard of Oz")
expect_equal(length(intersect(docs$questioned$doc, docs$model$doc)), 0)
})
test_that("an error occurs when the same documents are used in the model and questioned data frames", {
expect_error(select_csafe_docs(num_template_writers = 50,
template_sessions = 1,
template_reps = 1,
template_prompts = "London Letter",
template_seed = 100,
num_model_writers = 40,
model_sessions = 1,
model_reps = c(1,2,3),
model_prompts = "Wizard of Oz",
model_seed = 101,
questioned_sessions = 1,
questioned_reps = 1,
questioned_prompts = "Wizard of Oz"))
})
| /tests/testthat/test-SelectCSAFEDocs.R | no_license | CSAFE-ISU/handwriter | R | false | false | 5,117 | r | test_that("an error occurs when the sum of template and model writers is greater than the total number of CSAFE writers", {
expect_error(select_csafe_docs(num_template_writers = 200,
template_sessions = 1,
template_reps = 1,
template_prompts = "London Letter",
template_seed = 100,
num_model_writers = 300,
model_sessions = 1,
model_reps = c(1,2,3),
model_prompts = "Wizard of Oz",
model_seed = 101,
questioned_sessions = 3,
questioned_reps = 1,
questioned_prompts = "Wizard of Oz"))
})
test_that("no writers are in both the template data frame and the model data frame", {
docs1 <- select_csafe_docs(num_template_writers = 50,
template_sessions = 1,
template_reps = 1,
template_prompts = "London Letter",
template_seed = 100,
num_model_writers = 40,
model_sessions = 1,
model_reps = c(1,2,3),
model_prompts = "Wizard of Oz",
model_seed = 101,
questioned_sessions = 3,
questioned_reps = 1,
questioned_prompts = "Wizard of Oz")
docs2 <- select_csafe_docs(num_template_writers = 50,
template_sessions = c(1,2,3),
template_reps = c(1,2,3),
template_prompts = "London Letter",
template_seed = 200,
num_model_writers = 40,
model_sessions = 1,
model_reps = c(1,2,3),
model_prompts = "Wizard of Oz",
model_seed = 300,
questioned_sessions = 3,
questioned_reps = 1,
questioned_prompts = "Wizard of Oz")
expect_equal(length(intersect(docs1$template$writer, docs1$model$writer)), 0)
expect_equal(length(intersect(docs2$template$writer, docs2$model$writer)), 0)
})
test_that("the writers are the same in the model and questioned data frames", {
docs <- select_csafe_docs(num_template_writers = 50,
template_sessions = 1,
template_reps = 1,
template_prompts = "London Letter",
template_seed = 100,
num_model_writers = 40,
model_sessions = 1,
model_reps = c(1,2,3),
model_prompts = "Wizard of Oz",
model_seed = 101,
questioned_sessions = 3,
questioned_reps = 1,
questioned_prompts = "Wizard of Oz")
expect_identical(unique(docs$model$writer), unique(docs$questioned$writer))
})
test_that("the documents are different in the model and questioned data frames", {
docs <- select_csafe_docs(num_template_writers = 50,
template_sessions = 1,
template_reps = 1,
template_prompts = "London Letter",
template_seed = 100,
num_model_writers = 40,
model_sessions = 1,
model_reps = c(1,2,3),
model_prompts = "Wizard of Oz",
model_seed = 101,
questioned_sessions = 3,
questioned_reps = 1,
questioned_prompts = "Wizard of Oz")
expect_equal(length(intersect(docs$questioned$doc, docs$model$doc)), 0)
})
test_that("an error occurs when the same documents are used in the model and questioned data frames", {
expect_error(select_csafe_docs(num_template_writers = 50,
template_sessions = 1,
template_reps = 1,
template_prompts = "London Letter",
template_seed = 100,
num_model_writers = 40,
model_sessions = 1,
model_reps = c(1,2,3),
model_prompts = "Wizard of Oz",
model_seed = 101,
questioned_sessions = 1,
questioned_reps = 1,
questioned_prompts = "Wizard of Oz"))
})
|
#' Plot a Lineweaver-Burk diagram and compute the ordinate intercept
#'
#' @param sub substrate concentration
#' @param vel enzyme velocity
#' @param title title of the plot
#' @param xlab lable of the abscissa
#' @param ylab lable of the ordinate
#'
Lineweaver_Burk <- function(sub, vel, title = "Lineweaver-Burk-Plot", xlab = "1/sub", ylab = "1/vel"){
LiBePlt <- (ggplot2::ggplot(mapping = ggplot2::aes(
x = 1/sub,
y = 1/vel
))+
ggplot2::geom_point()+
ggplot2::geom_smooth(
method = "lm",
fullrange = TRUE
)+
ggplot2::scale_x_continuous(expand=c(0,0), limits=c(0, max(1/sub+ 1))) +
ggplot2::scale_y_continuous(expand=c(0,0), limits=c(0, max(1/vel + .01))) +
ggplot2::ggtitle(title)+
ggplot2::xlab(xlab)+
ggplot2::ylab(ylab))
return(LiBePlt)
# Velo <-1/vel
# Subs <- 1/sub
# stats::coefficients(
# stats::lm(Velo~Subs)
# )
}
| /R/Lineweaver_Burk.R | no_license | abusjahn/Biotech | R | false | false | 885 | r | #' Plot a Lineweaver-Burk diagram and compute the ordinate intercept
#'
#' @param sub substrate concentration
#' @param vel enzyme velocity
#' @param title title of the plot
#' @param xlab lable of the abscissa
#' @param ylab lable of the ordinate
#'
Lineweaver_Burk <- function(sub, vel, title = "Lineweaver-Burk-Plot", xlab = "1/sub", ylab = "1/vel"){
LiBePlt <- (ggplot2::ggplot(mapping = ggplot2::aes(
x = 1/sub,
y = 1/vel
))+
ggplot2::geom_point()+
ggplot2::geom_smooth(
method = "lm",
fullrange = TRUE
)+
ggplot2::scale_x_continuous(expand=c(0,0), limits=c(0, max(1/sub+ 1))) +
ggplot2::scale_y_continuous(expand=c(0,0), limits=c(0, max(1/vel + .01))) +
ggplot2::ggtitle(title)+
ggplot2::xlab(xlab)+
ggplot2::ylab(ylab))
return(LiBePlt)
# Velo <-1/vel
# Subs <- 1/sub
# stats::coefficients(
# stats::lm(Velo~Subs)
# )
}
|
## Load the whole dataset
power <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
## Format the date
power$Date <- as.Date(power$Date, format="%d/%m/%Y")
## Subset the dataset
sub_power <- subset(power, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## Convert the dates
datetime <- paste(as.Date(sub_power$Date), sub_power$Time)
sub_power$Datetime <- as.POSIXct(datetime)
## Generate Plot2
plot(sub_power$Global_active_power~sub_power$Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
## Output to png file
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off() | /plot2.R | no_license | AElawar/Exploratory-Data-Analysis---Project1 | R | false | false | 737 | r | ## Load the whole dataset
power <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
## Format the date
power$Date <- as.Date(power$Date, format="%d/%m/%Y")
## Subset the dataset
sub_power <- subset(power, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## Convert the dates
datetime <- paste(as.Date(sub_power$Date), sub_power$Time)
sub_power$Datetime <- as.POSIXct(datetime)
## Generate Plot2
plot(sub_power$Global_active_power~sub_power$Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
## Output to png file
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off() |
#' Differential probes by standard deviation
#'
#' \code{dpsd} Find most variable features by standard deviation.
#'
#' @param x Matrix of numbers with columns indicating samples and rows
#' indicating features.
#' @param n Number of features to choose.
#'
#' @details Identifies the most variable features across samples by
#' standard deviation (sd). The \code{n} features with highest sd
#' is returned as a matrix.
#'
#' @return A matrix of nrow = n features with highest sd.
#' @export
#'
#' @examples
#' x1 <- matrix(rnorm(1000), nrow = 50)
#' rownames(x1) = paste('LowSD', 1:50, sep='_')
#' x2 <- matrix(rnorm(1000, sd = 3), nrow = 50)
#' rownames(x2) = paste('HighSD', 1:50, sep='_')
#' dpsd(rbind(x1,x2),20)
#'
dpsd <- function(x, n){
dumsd <- apply(x, 1, sd, na.rm=TRUE)
dumrows <- dim(x)[1]
dum <- x[dumsd >= quantile(dumsd, (dumrows-n)/dumrows, na.rm=TRUE),]
return(dum)
} | /R/dpsd.R | no_license | hotdiggitydogs/toolkit | R | false | false | 912 | r | #' Differential probes by standard deviation
#'
#' \code{dpsd} Find most variable features by standard deviation.
#'
#' @param x Matrix of numbers with columns indicating samples and rows
#' indicating features.
#' @param n Number of features to choose.
#'
#' @details Identifies the most variable features across samples by
#' standard deviation (sd). The \code{n} features with highest sd
#' is returned as a matrix.
#'
#' @return A matrix of nrow = n features with highest sd.
#' @export
#'
#' @examples
#' x1 <- matrix(rnorm(1000), nrow = 50)
#' rownames(x1) = paste('LowSD', 1:50, sep='_')
#' x2 <- matrix(rnorm(1000, sd = 3), nrow = 50)
#' rownames(x2) = paste('HighSD', 1:50, sep='_')
#' dpsd(rbind(x1,x2),20)
#'
dpsd <- function(x, n){
dumsd <- apply(x, 1, sd, na.rm=TRUE)
dumrows <- dim(x)[1]
dum <- x[dumsd >= quantile(dumsd, (dumrows-n)/dumrows, na.rm=TRUE),]
return(dum)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/example6.R
\docType{data}
\name{example6.rat}
\alias{example6.rat}
\title{Example 6 of rating data of two groups of unequal size}
\format{A data frame with 10 observations of 9 ratings. \describe{
\item{schoolid}{a numeric vector, identifying the second group
level} \item{groupid}{a numeric vector, identifying the first group
level.} \item{respid}{a numeric vector, identifying the individual.}
\item{r01}{ratings received by respondent 1.}
\item{r02}{ratings received by respondent 2.}
\item{r03}{ratings received by respondent 3.}
\item{r04}{ratings received by respondent 4.}
\item{r05}{ratings received by respondent 5.}
\item{r06}{ratings received by respondent 6.}
\item{r07}{ratings received by respondent 7.}
\item{r08}{ratings received by respondent 8.}
\item{r09}{ratings received by respondent 9.}
\item{r10}{ratings received by respondent 10.} }}
\description{
The combined data matrices of two groups, groups 10 and 20. Please note that
the missing ratings in group 10 are padded with NA's.\cr The result of
\code{readratdatafixed("<example6.rat.txt>")}. A 7-point rating scale has
been used. Each respondent is identified by a schoolid, a group id and a
respondent id. The rows contain the assessors, the columns contain the
assessed. When rater equals assessed (diagonal), the rating is NA.
}
\note{
Rating data can be entered directly into a SSrat compliant dataframe,
using \code{\link{edit}}. Colums needed are: "schoolid", "groupid",
"respid", and for <n> raters "r01", "r02".."r<n>". Optionally, a column
named "resplabel" can be entered, containing an additional identifier of the
raters/assessed. The raters (assessors) are in rows and assessed in columns.
For example: \cr mydata=data.frame(schoolid=numeric(0), groupid=numeric(0),
respid=numeric(0),\cr r01=numeric(0), r02=numeric(0), r03=numeric(0));
mydata=edit(mydata)
}
\examples{
data(example6.rat)
}
\seealso{
\code{\link{readratdatafixed}} \code{\link{calcallgroups}}
\code{\link{calcgroup}} \code{\link{example1.rat}}
\code{\link{example1a.rat}} \code{\link{example2.rat}}
\code{\link{example3.rat}} \code{\link{example4.rat}}
\code{\link{example5.rat}} %%\code{\link{example6.rat}}
\code{\link{example7.rat}} \code{\link{klas2.rat}}
}
\keyword{datasets}
| /man/example6.rat.Rd | no_license | cran/SSrat | R | false | true | 2,372 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/example6.R
\docType{data}
\name{example6.rat}
\alias{example6.rat}
\title{Example 6 of rating data of two groups of unequal size}
\format{A data frame with 10 observations of 9 ratings. \describe{
\item{schoolid}{a numeric vector, identifying the second group
level} \item{groupid}{a numeric vector, identifying the first group
level.} \item{respid}{a numeric vector, identifying the individual.}
\item{r01}{ratings received by respondent 1.}
\item{r02}{ratings received by respondent 2.}
\item{r03}{ratings received by respondent 3.}
\item{r04}{ratings received by respondent 4.}
\item{r05}{ratings received by respondent 5.}
\item{r06}{ratings received by respondent 6.}
\item{r07}{ratings received by respondent 7.}
\item{r08}{ratings received by respondent 8.}
\item{r09}{ratings received by respondent 9.}
\item{r10}{ratings received by respondent 10.} }}
\description{
The combined data matrices of two groups, groups 10 and 20. Please note that
the missing ratings in group 10 are padded with NA's.\cr The result of
\code{readratdatafixed("<example6.rat.txt>")}. A 7-point rating scale has
been used. Each respondent is identified by a schoolid, a group id and a
respondent id. The rows contain the assessors, the columns contain the
assessed. When rater equals assessed (diagonal), the rating is NA.
}
\note{
Rating data can be entered directly into a SSrat compliant dataframe,
using \code{\link{edit}}. Colums needed are: "schoolid", "groupid",
"respid", and for <n> raters "r01", "r02".."r<n>". Optionally, a column
named "resplabel" can be entered, containing an additional identifier of the
raters/assessed. The raters (assessors) are in rows and assessed in columns.
For example: \cr mydata=data.frame(schoolid=numeric(0), groupid=numeric(0),
respid=numeric(0),\cr r01=numeric(0), r02=numeric(0), r03=numeric(0));
mydata=edit(mydata)
}
\examples{
data(example6.rat)
}
\seealso{
\code{\link{readratdatafixed}} \code{\link{calcallgroups}}
\code{\link{calcgroup}} \code{\link{example1.rat}}
\code{\link{example1a.rat}} \code{\link{example2.rat}}
\code{\link{example3.rat}} \code{\link{example4.rat}}
\code{\link{example5.rat}} %%\code{\link{example6.rat}}
\code{\link{example7.rat}} \code{\link{klas2.rat}}
}
\keyword{datasets}
|
source.data <- read_csv("./input/EXPORTED_DATA.csv")
source("./models/helper_functions.R")
head(source.data,5)
source.data$`Dosage Code` <- NULL
source.data$`Dosage Description` <- NULL
#######################################################################################################################
# NLP testing
#######################################################################################################################
head(source.data)
source.data <- source.data %>%
mutate( BreakFast = `BreakFast Tray1` + `BreakFast Tray2`
, Lunch = `Lunch Tray1` + `Lunch Tray2`
, Dinner = `Dinner Tray1` + `Dinner Tray 2`
, BedTime = `Bed time Tray1` + `Bed time Tray2`
)
source.data <- source.data %>%
select (`Script Directions`,BreakFast, Lunch, Dinner, BedTime) %>%
mutate(ScriptQty = BreakFast+Lunch+Dinner+BedTime)
source.data$BreakFast <- ifelse(source.data$BreakFast > 0 ,1, 0)
source.data$Lunch <- ifelse(source.data$Lunch > 0 ,1, 0)
source.data$Dinner <- ifelse(source.data$Dinner > 0 ,1, 0)
source.data$BedTime <- ifelse(source.data$BedTime > 0 ,1, 0)
head(source.data)
library(tm)
myCorpus<-Corpus(VectorSource(source.data$`Script Directions`)) #converts the relevant part of your file into a corpus
myCorpus = tm_map(myCorpus, PlainTextDocument) # an intermediate preprocessing step
myCorpus = tm_map(myCorpus, tolower) # converts all text to lower case
myCorpus = tm_map(myCorpus, removePunctuation) #removes punctuation
myCorpus = tm_map(myCorpus, removeWords, stopwords("english")) #removes common words like "a", "the" etc
myCorpus = tm_map(myCorpus, stemDocument) # removes the last few letters of similar words such as get, getting, gets
dtm = DocumentTermMatrix(myCorpus) #turns the corpus into a document term matrix
notSparse = removeSparseTerms(dtm, 0.99) # extracts frequently occuring words
finalWords=as.data.frame(as.matrix(notSparse)) # most frequent words remain in a dataframe, with one column per word
head(finalWords)
train <- cbind(source.data, finalWords)
head(train)
train$ID <- seq.int(nrow(train))
###########################################################################################################
# CV folds creation #######################################################################################
###########################################################################################################
#Input to function
train.CV <- as.data.frame(train[,c("ID")])
names(train.CV) <- "ID"
Create5Folds <- function(train, CVSourceColumn, RandomSample, RandomSeed)
{
set.seed(RandomSeed)
if(RandomSample)
{
train <- as.data.frame(train[sample(1:nrow(train)), ])
names(train)[1] <- CVSourceColumn
}
names(train)[1] <- CVSourceColumn
folds <- createFolds(train[[CVSourceColumn]], k = 5)
trainingFold01 <- as.data.frame(train[folds$Fold1, ])
trainingFold01$CVindices <- 1
trainingFold02 <- as.data.frame(train[folds$Fold2, ])
trainingFold02$CVindices <- 2
trainingFold03 <- as.data.frame(train[folds$Fold3, ])
trainingFold03$CVindices <- 3
trainingFold04 <- as.data.frame(train[folds$Fold4, ])
trainingFold04$CVindices <- 4
trainingFold05 <- as.data.frame(train[folds$Fold5, ])
trainingFold05$CVindices <- 5
names(trainingFold01)[1] <- CVSourceColumn
names(trainingFold02)[1] <- CVSourceColumn
names(trainingFold03)[1] <- CVSourceColumn
names(trainingFold04)[1] <- CVSourceColumn
names(trainingFold05)[1] <- CVSourceColumn
trainingFolds <- rbind(trainingFold01, trainingFold02 , trainingFold03, trainingFold04, trainingFold05 )
rm(trainingFold01,trainingFold02,trainingFold03,trainingFold04,trainingFold05); gc()
return(trainingFolds)
}
###########################################################################################################
# CV folds creation #######################################################################################
###########################################################################################################
Prav_CVindices <- Create5Folds(train.CV, "ID", RandomSample=TRUE, RandomSeed=2017)
train <- left_join(train, Prav_CVindices, by = "ID")
rm(train.CV, Prav_CVindices); gc()
| /Driver/01.FeatureEngineering.R | no_license | PraveenAdepu/kaggle_competitions | R | false | false | 4,466 | r |
source.data <- read_csv("./input/EXPORTED_DATA.csv")
source("./models/helper_functions.R")
head(source.data,5)
source.data$`Dosage Code` <- NULL
source.data$`Dosage Description` <- NULL
#######################################################################################################################
# NLP testing
#######################################################################################################################
head(source.data)
source.data <- source.data %>%
mutate( BreakFast = `BreakFast Tray1` + `BreakFast Tray2`
, Lunch = `Lunch Tray1` + `Lunch Tray2`
, Dinner = `Dinner Tray1` + `Dinner Tray 2`
, BedTime = `Bed time Tray1` + `Bed time Tray2`
)
source.data <- source.data %>%
select (`Script Directions`,BreakFast, Lunch, Dinner, BedTime) %>%
mutate(ScriptQty = BreakFast+Lunch+Dinner+BedTime)
source.data$BreakFast <- ifelse(source.data$BreakFast > 0 ,1, 0)
source.data$Lunch <- ifelse(source.data$Lunch > 0 ,1, 0)
source.data$Dinner <- ifelse(source.data$Dinner > 0 ,1, 0)
source.data$BedTime <- ifelse(source.data$BedTime > 0 ,1, 0)
head(source.data)
library(tm)
myCorpus<-Corpus(VectorSource(source.data$`Script Directions`)) #converts the relevant part of your file into a corpus
myCorpus = tm_map(myCorpus, PlainTextDocument) # an intermediate preprocessing step
myCorpus = tm_map(myCorpus, tolower) # converts all text to lower case
myCorpus = tm_map(myCorpus, removePunctuation) #removes punctuation
myCorpus = tm_map(myCorpus, removeWords, stopwords("english")) #removes common words like "a", "the" etc
myCorpus = tm_map(myCorpus, stemDocument) # removes the last few letters of similar words such as get, getting, gets
dtm = DocumentTermMatrix(myCorpus) #turns the corpus into a document term matrix
notSparse = removeSparseTerms(dtm, 0.99) # extracts frequently occuring words
finalWords=as.data.frame(as.matrix(notSparse)) # most frequent words remain in a dataframe, with one column per word
head(finalWords)
train <- cbind(source.data, finalWords)
head(train)
train$ID <- seq.int(nrow(train))
###########################################################################################################
# CV folds creation #######################################################################################
###########################################################################################################
#Input to function
train.CV <- as.data.frame(train[,c("ID")])
names(train.CV) <- "ID"
Create5Folds <- function(train, CVSourceColumn, RandomSample, RandomSeed)
{
set.seed(RandomSeed)
if(RandomSample)
{
train <- as.data.frame(train[sample(1:nrow(train)), ])
names(train)[1] <- CVSourceColumn
}
names(train)[1] <- CVSourceColumn
folds <- createFolds(train[[CVSourceColumn]], k = 5)
trainingFold01 <- as.data.frame(train[folds$Fold1, ])
trainingFold01$CVindices <- 1
trainingFold02 <- as.data.frame(train[folds$Fold2, ])
trainingFold02$CVindices <- 2
trainingFold03 <- as.data.frame(train[folds$Fold3, ])
trainingFold03$CVindices <- 3
trainingFold04 <- as.data.frame(train[folds$Fold4, ])
trainingFold04$CVindices <- 4
trainingFold05 <- as.data.frame(train[folds$Fold5, ])
trainingFold05$CVindices <- 5
names(trainingFold01)[1] <- CVSourceColumn
names(trainingFold02)[1] <- CVSourceColumn
names(trainingFold03)[1] <- CVSourceColumn
names(trainingFold04)[1] <- CVSourceColumn
names(trainingFold05)[1] <- CVSourceColumn
trainingFolds <- rbind(trainingFold01, trainingFold02 , trainingFold03, trainingFold04, trainingFold05 )
rm(trainingFold01,trainingFold02,trainingFold03,trainingFold04,trainingFold05); gc()
return(trainingFolds)
}
###########################################################################################################
# CV folds creation #######################################################################################
###########################################################################################################
Prav_CVindices <- Create5Folds(train.CV, "ID", RandomSample=TRUE, RandomSeed=2017)
train <- left_join(train, Prav_CVindices, by = "ID")
rm(train.CV, Prav_CVindices); gc()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s.SPLS.R
\name{s.SPLS}
\alias{s.SPLS}
\title{Sparse Partial Least Squares Regression [C, R]}
\usage{
s.SPLS(
x,
y = NULL,
x.test = NULL,
y.test = NULL,
x.name = NULL,
y.name = NULL,
upsample = TRUE,
downsample = FALSE,
resample.seed = NULL,
k = 2,
eta = 0.5,
kappa = 0.5,
select = "pls2",
fit = "simpls",
scale.x = TRUE,
scale.y = TRUE,
maxstep = 100,
classifier = c("lda", "logistic"),
grid.resample.rtset = rtset.resample("kfold", 5),
grid.search.type = c("exhaustive", "randomized"),
grid.randomized.p = 0.1,
metric = NULL,
maximize = NULL,
print.plot = TRUE,
plot.fitted = NULL,
plot.predicted = NULL,
plot.theme = getOption("rt.fit.theme", "lightgrid"),
question = NULL,
rtclass = NULL,
verbose = TRUE,
trace = 0,
grid.verbose = TRUE,
outdir = NULL,
save.mod = ifelse(!is.null(outdir), TRUE, FALSE),
n.cores = rtCores,
...
)
}
\arguments{
\item{x}{Numeric vector or matrix / data frame of features i.e. independent variables}
\item{y}{Numeric vector of outcome, i.e. dependent variable}
\item{x.test}{Numeric vector or matrix / data frame of testing set features
Columns must correspond to columns in \code{x}}
\item{y.test}{Numeric vector of testing set outcome}
\item{x.name}{Character: Name for feature set}
\item{y.name}{Character: Name for outcome}
\item{upsample}{Logical: If TRUE, upsample cases to balance outcome classes (for Classification only)
Caution: upsample will randomly sample with replacement if the length of the majority class is more than double
the length of the class you are upsampling, thereby introducing randomness}
\item{resample.seed}{Integer: If provided, will be used to set the seed during upsampling.
Default = NULL (random seed)}
\item{k}{[gS] Integer: Number of components to estimate. Default = 2}
\item{eta}{[gS] Float [0, 1): Thresholding parameter. Default = .5}
\item{kappa}{[gS] Float [0, .5]: Only relevant for multivariate responses: controls effect of concavity of objective
function. Default = .5}
\item{select}{[gS] Character: "pls2", "simpls". PLS algorithm for variable selection. Default = "pls2"}
\item{fit}{[gS] Character: "kernelpls", "widekernelpls", "simpls", "oscorespls". Algorithm for model fitting.
Default = "simpls"}
\item{scale.x}{Logical: if TRUE, scale features by dividing each column by its sample standard deviation}
\item{scale.y}{Logical: if TRUE, scale outcomes by dividing each column by its sample standard deviation}
\item{maxstep}{[gS] Integer: Maximum number of iteration when fitting direction vectors. Default = 100}
\item{classifier}{Character: Classifier used by \code{spls::splsda} "lda" or "logistic": Default = "lda"}
\item{grid.resample.rtset}{List: Output of \link{rtset.resample} defining \link{gridSearchLearn} parameters.
Default = \code{rtset.resample("kfold", 5)}}
\item{grid.search.type}{Character: Type of grid search to perform: "exhaustive" or "randomized". Default = "exhaustive"}
\item{grid.randomized.p}{Float (0, 1): If \code{grid.search.type = "randomized"}, randomly run this proportion of
combinations. Default = .1}
\item{metric}{Character: Metric to minimize, or maximize if \code{maximize = TRUE} during grid search.
Default = NULL, which results in "Balanced Accuracy" for Classification,
"MSE" for Regression, and "Coherence" for Survival Analysis.}
\item{maximize}{Logical: If TRUE, \code{metric} will be maximized if grid search is run. Default = FALSE}
\item{print.plot}{Logical: if TRUE, produce plot using \code{mplot3}
Takes precedence over \code{plot.fitted} and \code{plot.predicted}. Default = TRUE}
\item{plot.fitted}{Logical: if TRUE, plot True (y) vs Fitted}
\item{plot.predicted}{Logical: if TRUE, plot True (y.test) vs Predicted.
Requires \code{x.test} and \code{y.test}}
\item{plot.theme}{Character: "zero", "dark", "box", "darkbox"}
\item{question}{Character: the question you are attempting to answer with this model, in plain language.}
\item{verbose}{Logical: If TRUE, print summary to screen.}
\item{grid.verbose}{Logical: Passed to \link{gridSearchLearn}}
\item{outdir}{Path to output directory.
If defined, will save Predicted vs. True plot, if available,
as well as full model output, if \code{save.mod} is TRUE}
\item{save.mod}{Logical: If TRUE, save all output to an RDS file in \code{outdir}
\code{save.mod} is TRUE by default if an \code{outdir} is defined. If set to TRUE, and no \code{outdir}
is defined, outdir defaults to \code{paste0("./s.", mod.name)}}
\item{n.cores}{Integer: Number of cores to be used by \link{gridSearchLearn}, if applicable}
\item{...}{Additional parameters to be passed to \code{npreg}}
}
\value{
Object of class \pkg{rtemis}
}
\description{
Train an SPLS model using \code{spls::spls} (Regression) and \code{spls::splsda} (Classification)
}
\details{
[gS] denotes argument can be passed as a vector of values, which will trigger a grid search using \link{gridSearchLearn}
\code{np::npreg} allows inputs with mixed data types.
}
\examples{
\dontrun{
x <- rnorm(100)
y <- .6 * x + 12 + rnorm(100)
mod <- s.SPLS(x, y)}
}
\seealso{
\link{elevate} for external cross-validation
Other Supervised Learning:
\code{\link{s.ADABOOST}()},
\code{\link{s.ADDTREE}()},
\code{\link{s.BART}()},
\code{\link{s.BAYESGLM}()},
\code{\link{s.BRUTO}()},
\code{\link{s.C50}()},
\code{\link{s.CART}()},
\code{\link{s.CTREE}()},
\code{\link{s.DA}()},
\code{\link{s.ET}()},
\code{\link{s.EVTREE}()},
\code{\link{s.GAM.default}()},
\code{\link{s.GAM.formula}()},
\code{\link{s.GAMSELX2}()},
\code{\link{s.GAMSELX}()},
\code{\link{s.GAMSEL}()},
\code{\link{s.GAM}()},
\code{\link{s.GBM3}()},
\code{\link{s.GBM}()},
\code{\link{s.GLMNET}()},
\code{\link{s.GLM}()},
\code{\link{s.GLS}()},
\code{\link{s.H2ODL}()},
\code{\link{s.H2OGBM}()},
\code{\link{s.H2ORF}()},
\code{\link{s.IRF}()},
\code{\link{s.KNN}()},
\code{\link{s.LDA}()},
\code{\link{s.LM}()},
\code{\link{s.MARS}()},
\code{\link{s.MLRF}()},
\code{\link{s.NBAYES}()},
\code{\link{s.NLA}()},
\code{\link{s.NLS}()},
\code{\link{s.NW}()},
\code{\link{s.POLYMARS}()},
\code{\link{s.PPR}()},
\code{\link{s.PPTREE}()},
\code{\link{s.QDA}()},
\code{\link{s.QRNN}()},
\code{\link{s.RANGER}()},
\code{\link{s.RFSRC}()},
\code{\link{s.RF}()},
\code{\link{s.SGD}()},
\code{\link{s.SVM}()},
\code{\link{s.TFN}()},
\code{\link{s.XGBLIN}()},
\code{\link{s.XGB}()}
}
\author{
E.D. Gennatas
}
\concept{Supervised Learning}
| /man/s.SPLS.Rd | no_license | DrRoad/rtemis | R | false | true | 6,489 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s.SPLS.R
\name{s.SPLS}
\alias{s.SPLS}
\title{Sparse Partial Least Squares Regression [C, R]}
\usage{
s.SPLS(
x,
y = NULL,
x.test = NULL,
y.test = NULL,
x.name = NULL,
y.name = NULL,
upsample = TRUE,
downsample = FALSE,
resample.seed = NULL,
k = 2,
eta = 0.5,
kappa = 0.5,
select = "pls2",
fit = "simpls",
scale.x = TRUE,
scale.y = TRUE,
maxstep = 100,
classifier = c("lda", "logistic"),
grid.resample.rtset = rtset.resample("kfold", 5),
grid.search.type = c("exhaustive", "randomized"),
grid.randomized.p = 0.1,
metric = NULL,
maximize = NULL,
print.plot = TRUE,
plot.fitted = NULL,
plot.predicted = NULL,
plot.theme = getOption("rt.fit.theme", "lightgrid"),
question = NULL,
rtclass = NULL,
verbose = TRUE,
trace = 0,
grid.verbose = TRUE,
outdir = NULL,
save.mod = ifelse(!is.null(outdir), TRUE, FALSE),
n.cores = rtCores,
...
)
}
\arguments{
\item{x}{Numeric vector or matrix / data frame of features i.e. independent variables}
\item{y}{Numeric vector of outcome, i.e. dependent variable}
\item{x.test}{Numeric vector or matrix / data frame of testing set features
Columns must correspond to columns in \code{x}}
\item{y.test}{Numeric vector of testing set outcome}
\item{x.name}{Character: Name for feature set}
\item{y.name}{Character: Name for outcome}
\item{upsample}{Logical: If TRUE, upsample cases to balance outcome classes (for Classification only)
Caution: upsample will randomly sample with replacement if the length of the majority class is more than double
the length of the class you are upsampling, thereby introducing randomness}
\item{resample.seed}{Integer: If provided, will be used to set the seed during upsampling.
Default = NULL (random seed)}
\item{k}{[gS] Integer: Number of components to estimate. Default = 2}
\item{eta}{[gS] Float [0, 1): Thresholding parameter. Default = .5}
\item{kappa}{[gS] Float [0, .5]: Only relevant for multivariate responses: controls effect of concavity of objective
function. Default = .5}
\item{select}{[gS] Character: "pls2", "simpls". PLS algorithm for variable selection. Default = "pls2"}
\item{fit}{[gS] Character: "kernelpls", "widekernelpls", "simpls", "oscorespls". Algorithm for model fitting.
Default = "simpls"}
\item{scale.x}{Logical: if TRUE, scale features by dividing each column by its sample standard deviation}
\item{scale.y}{Logical: if TRUE, scale outcomes by dividing each column by its sample standard deviation}
\item{maxstep}{[gS] Integer: Maximum number of iteration when fitting direction vectors. Default = 100}
\item{classifier}{Character: Classifier used by \code{spls::splsda} "lda" or "logistic": Default = "lda"}
\item{grid.resample.rtset}{List: Output of \link{rtset.resample} defining \link{gridSearchLearn} parameters.
Default = \code{rtset.resample("kfold", 5)}}
\item{grid.search.type}{Character: Type of grid search to perform: "exhaustive" or "randomized". Default = "exhaustive"}
\item{grid.randomized.p}{Float (0, 1): If \code{grid.search.type = "randomized"}, randomly run this proportion of
combinations. Default = .1}
\item{metric}{Character: Metric to minimize, or maximize if \code{maximize = TRUE} during grid search.
Default = NULL, which results in "Balanced Accuracy" for Classification,
"MSE" for Regression, and "Coherence" for Survival Analysis.}
\item{maximize}{Logical: If TRUE, \code{metric} will be maximized if grid search is run. Default = FALSE}
\item{print.plot}{Logical: if TRUE, produce plot using \code{mplot3}
Takes precedence over \code{plot.fitted} and \code{plot.predicted}. Default = TRUE}
\item{plot.fitted}{Logical: if TRUE, plot True (y) vs Fitted}
\item{plot.predicted}{Logical: if TRUE, plot True (y.test) vs Predicted.
Requires \code{x.test} and \code{y.test}}
\item{plot.theme}{Character: "zero", "dark", "box", "darkbox"}
\item{question}{Character: the question you are attempting to answer with this model, in plain language.}
\item{verbose}{Logical: If TRUE, print summary to screen.}
\item{grid.verbose}{Logical: Passed to \link{gridSearchLearn}}
\item{outdir}{Path to output directory.
If defined, will save Predicted vs. True plot, if available,
as well as full model output, if \code{save.mod} is TRUE}
\item{save.mod}{Logical: If TRUE, save all output to an RDS file in \code{outdir}
\code{save.mod} is TRUE by default if an \code{outdir} is defined. If set to TRUE, and no \code{outdir}
is defined, outdir defaults to \code{paste0("./s.", mod.name)}}
\item{n.cores}{Integer: Number of cores to be used by \link{gridSearchLearn}, if applicable}
\item{...}{Additional parameters to be passed to \code{npreg}}
}
\value{
Object of class \pkg{rtemis}
}
\description{
Train an SPLS model using \code{spls::spls} (Regression) and \code{spls::splsda} (Classification)
}
\details{
[gS] denotes argument can be passed as a vector of values, which will trigger a grid search using \link{gridSearchLearn}
\code{np::npreg} allows inputs with mixed data types.
}
\examples{
\dontrun{
x <- rnorm(100)
y <- .6 * x + 12 + rnorm(100)
mod <- s.SPLS(x, y)}
}
\seealso{
\link{elevate} for external cross-validation
Other Supervised Learning:
\code{\link{s.ADABOOST}()},
\code{\link{s.ADDTREE}()},
\code{\link{s.BART}()},
\code{\link{s.BAYESGLM}()},
\code{\link{s.BRUTO}()},
\code{\link{s.C50}()},
\code{\link{s.CART}()},
\code{\link{s.CTREE}()},
\code{\link{s.DA}()},
\code{\link{s.ET}()},
\code{\link{s.EVTREE}()},
\code{\link{s.GAM.default}()},
\code{\link{s.GAM.formula}()},
\code{\link{s.GAMSELX2}()},
\code{\link{s.GAMSELX}()},
\code{\link{s.GAMSEL}()},
\code{\link{s.GAM}()},
\code{\link{s.GBM3}()},
\code{\link{s.GBM}()},
\code{\link{s.GLMNET}()},
\code{\link{s.GLM}()},
\code{\link{s.GLS}()},
\code{\link{s.H2ODL}()},
\code{\link{s.H2OGBM}()},
\code{\link{s.H2ORF}()},
\code{\link{s.IRF}()},
\code{\link{s.KNN}()},
\code{\link{s.LDA}()},
\code{\link{s.LM}()},
\code{\link{s.MARS}()},
\code{\link{s.MLRF}()},
\code{\link{s.NBAYES}()},
\code{\link{s.NLA}()},
\code{\link{s.NLS}()},
\code{\link{s.NW}()},
\code{\link{s.POLYMARS}()},
\code{\link{s.PPR}()},
\code{\link{s.PPTREE}()},
\code{\link{s.QDA}()},
\code{\link{s.QRNN}()},
\code{\link{s.RANGER}()},
\code{\link{s.RFSRC}()},
\code{\link{s.RF}()},
\code{\link{s.SGD}()},
\code{\link{s.SVM}()},
\code{\link{s.TFN}()},
\code{\link{s.XGBLIN}()},
\code{\link{s.XGB}()}
}
\author{
E.D. Gennatas
}
\concept{Supervised Learning}
|
data <- read.table('USCrime.txt', header = TRUE)
# 'R' is the reponse (crime rate).
# We'll focus on the following predictors:
# Age: number of male aged 14--24 per 1000 population
# Ed: mean # of years of schooling
# Ex0: per capita expenditure on police by government
# N: state population
# U1: unemployment rate of urban males
# W: median family goods
data <- data[, c('R', 'Age', 'Ed', 'Ex0', 'N', 'U1', 'W')]
n <- nrow(data)
print(n)
myfit <- lm(R ~ ., data)
# 'h_ii', or 'leverage' for each observation:
print(hatvalues(myfit))
# Studentized residuals:
rst <- rstudent(myfit)
print(rst)
# Make a normal probability plot.
pdf(file = 'part13-rstud-qq.pdf', width = 5, height = 5)
qqnorm(rst, main = 'Norm QQ plot of studentized residuals')
qqline(rst)
dev.off()
# Cook's distance of each case:
print(cooks.distance(myfit))
| /stat401/2010-fall/lect/part13.R | no_license | zpz/teaching | R | false | false | 849 | r | data <- read.table('USCrime.txt', header = TRUE)
# 'R' is the reponse (crime rate).
# We'll focus on the following predictors:
# Age: number of male aged 14--24 per 1000 population
# Ed: mean # of years of schooling
# Ex0: per capita expenditure on police by government
# N: state population
# U1: unemployment rate of urban males
# W: median family goods
data <- data[, c('R', 'Age', 'Ed', 'Ex0', 'N', 'U1', 'W')]
n <- nrow(data)
print(n)
myfit <- lm(R ~ ., data)
# 'h_ii', or 'leverage' for each observation:
print(hatvalues(myfit))
# Studentized residuals:
rst <- rstudent(myfit)
print(rst)
# Make a normal probability plot.
pdf(file = 'part13-rstud-qq.pdf', width = 5, height = 5)
qqnorm(rst, main = 'Norm QQ plot of studentized residuals')
qqline(rst)
dev.off()
# Cook's distance of each case:
print(cooks.distance(myfit))
|
library('testthat')
test_dir('~/R_wd/visa-gwas/scripts/VISA-shiny/tests', reporter = 'Summary') | /scripts/Interactive_VISA_model/run_test.R | no_license | CoolEvilgenius/phyc | R | false | false | 98 | r | library('testthat')
test_dir('~/R_wd/visa-gwas/scripts/VISA-shiny/tests', reporter = 'Summary') |
# Description: ----------------------------------------------
# .
# The concept of the analysis is in ../Doc/mtg_190402.pdf.
#
# 19/ 04/ 21-
# Settings: -------------------------------------------------
set.seed(22)
dir.sub <- "../sub"
fn.require_packages.R <- "require_libraries.R"
dir.data <- "../../Data"
fn.data <- "AnalysisDataSet_v4.0.RData"
load(sprintf("%s/%s", dir.data, fn.data))
data$Ro52_log10 <- log(
data$Ro52,
base = 10
)
data$Ro60_log10 <- log(
data$anti_SS_A_ab,
base = 10
)
dir.output <- "../../Output/Final"
# Load subroutines: ----------------------------------------------
Bibtex <- FALSE
source(
sprintf(
"%s/%s", dir.sub, fn.require_packages.R
)
)
# Dichotomyze outcomes for Lip_biopsy and ESSDAI -----------------------------------------------------
data_Dichotomyze <- data %>%
mutate(
Ro52_ext.High = # lately (within this pipe) converted to factor
ifelse(Ro52 > 500, 1, 0),
Lip_biopsy_Dimyze_by_0 =
ifelse(
is.na(Lip_biopsy_tri), NA,
ifelse(
Lip_biopsy_tri ==
0, 0, 1
)
),
Lip_biopsy_Dimyze_by_3 =
ifelse(
is.na(Lip_biopsy_tri), NA,
ifelse(
Lip_biopsy_tri %in%
c(0, 3), 0, 1
)
),
ESSDAI_Dimyze_by_2 =
ifelse(
is.na(ESSDAI), NA,
ifelse(
ESSDAI <= 2, 0, 1
)
),
ESSDAI_Dimyze_by_5 =
ifelse(
is.na(ESSDAI), NA,
ifelse(
ESSDAI <= 5, 0, 1
)
),
FS_Dimyze_by_0 =
ifelse(
is.na(FS), NA,
ifelse(
FS < 1, 0, 1
)
),
FS_Dimyze_by_2 =
ifelse(
is.na(FS), NA,
ifelse(
FS < 3, 0, 1
)
),
FS_Dimyze_by_4 =
ifelse(
is.na(FS), NA,
ifelse(
FS < 5, 0, 1
)
)
) %>%
mutate(Ro52_ext.High=factor(Ro52_ext.High, levels = c(0,1), labels = c("Normal", "High"))) %>%
filter(SS==1)
AECG_component.num <- c(
"FS",
"anti_SS_B_ab"
)
AECG_component <- c(
"Dry_mouth", #component
"Dry_eye", #component
"FS", #component
"Lip_biopsy_tri", #component
"anti_SS_B_ab", #component
"anti_SS_B_ab_pn", #component
"ACA_np",
"Raynaud_np",
"RF_pn",
"IgG_pn",
"Saxon_test_np",
"Schirmer_test_np",
"nijisei"
)
# Data shaping ----------------------------------------------------------
data_tidy <- data_Dichotomyze %>%
filter(
disease_group=="SS"
) %>%
mutate(
Ro60 = anti_SS_A_ab
) %>%
mutate_if(is.factor, as.numeric) %>%
gather(
var, val,
-SubjID, -Ro52, -Ro52_log10,-Ro60, -Age,
-disease_group, -nijisei
) %>%
mutate(
val = as.numeric(val),
Ro60_log10 = log(Ro60,base = 10)
) %>%
filter(
var %in%
c(AECG_component,"Ro52_ext.High")
) %>%
gather(
var_y, val_y,
-SubjID, -Age, -var, -val,
-disease_group,- nijisei
)
# ANOVA -------------------------------------------------------------------
df.ANOVA <- data_Dichotomyze %>%
dplyr::select(
SubjID,
Ro52_log10,
Ro60_log10,
AECG_component
)
# make list of covariates
list.formula <- dlply(
data.frame(
"id" = 1:length(AECG_component[c(1,2,4,6,7,8,9,10,11,12,13)]),
"var"= AECG_component[c(1,2,4,6,7,8,9,10,11,12,13)]
),
.(id),
function(D){
fmr = sprintf(
"%s~%s",
"Ro52_log10",
D$var
)
return(fmr)
}
)
res.lmrob.Ro52_log10 <-
llply(
list.formula,
function(L){
robustbase::lmrob(
as.formula(L),
df.ANOVA,
method = 'MM',
# setting="KS2014",
control = lmrob.control(maxit.scale = 2000)
)
}
)
# make list of covariates
list.formula_Ro60 <- dlply(
data.frame(
"id" = 1:length(AECG_component[c(1,2,4,6,7,8,9,10,11,12)]),
"var"= AECG_component[c(1,2,4,6,7,8,9,10,11,12)]
),
.(id),
function(D){
fmr = sprintf(
"%s~%s",
"Ro60_log10",
D$var
)
return(fmr)
}
)
res.lmrob.Ro60_log10 <-
llply(
list.formula_Ro60,
function(L){
robustbase::lmrob(
as.formula(L),
df.ANOVA,
method = 'MM',
# setting="KS2014",
control = lmrob.control(maxit.scale = 2000)
)
}
)
# Extract estimated coefficients
coef.lmrob.Ro52_log10 <- llply(
res.lmrob.Ro52_log10,
summary
) %>%
ldply(
function(L){
out <- coef(L) %>%
as.data.frame() %>%
rownames_to_column("terms")
return(out)
}
)
coef.lmrob.Ro60_log10 <- llply(
res.lmrob.Ro60_log10,
summary
) %>%
ldply(
function(L){
out <- coef(L) %>%
as.data.frame() %>%
rownames_to_column("terms")
return(out)
}
)
# Boxplot -----------------------------------------------------------------
gg.data_tidy <- data_tidy %>%
filter(
var %in% c(
AECG_component[c(1,2,6,7,8,9,10,11,12)],
"Ro52_ext.High"
)
) %>%
mutate(
thre = ifelse(
var_y=="Ro52", 10,
ifelse(
var_y=="Ro52_log10", 1, NA
)
),
Alpha = ifelse(
var_y %in% c("Ro52", "Ro52_log10"), 0.8, 0
)
) %>%
ggplot(
aes(
x = as.factor(val),
y = val_y,
yintercept = thre
)
)
plot.boxplot <- plot(
gg.data_tidy + geom_boxplot(outlier.alpha = 0) +
geom_beeswarm(col="black", size=1, alpha=1) +
# geom_jitter(col="black", size=0.2, alpha=1, height = 0.01, width = 0.1) +
geom_hline(aes(yintercept = thre, alpha = Alpha), size=0.5, col="black") +
facet_grid(
var + var_y ~ disease_group,# + nijisei,
scales = "free") +
theme_bw()
)
# Scatterplot -----------------------------------------------------------------
gg.data_tidy.scatter <- data_tidy %>%
filter(
(var %in% c("FS", "anti_SS_B_ab"))
) %>%
# filter(!is.na(val)) %>%
ggplot(
aes(
x = as.numeric(val),
y = val_y
)
)
plot.scatterplot <- plot(
gg.data_tidy.scatter + #geom_boxplot(outlier.alpha = 0) +
geom_point(alpha=0.6) +
scale_x_continuous(trans = "log10") +
facet_grid( var + var_y ~., scales = "free") +
theme_bw()
)
gg.data_tidy.scatter <- data_tidy %>%
filter(
(var %in% c("FS", "anti_SS_B_ab"))
) %>%
# filter(!is.na(val)) %>%
ggplot(
aes(
x = as.numeric(val),
y = val_y
)
)
plot.scatterplot.Subgroup_pri_sec <- plot(
gg.data_tidy.scatter + #geom_boxplot(outlier.alpha = 0) +
geom_point(alpha=0.6) +
scale_x_continuous(trans = "log10") +
facet_wrap( var + var_y ~ disease_group + nijisei, scales = "free") +
theme_bw()
)
plot.scatterplot <- plot(
gg.data_tidy.scatter + #geom_boxplot(outlier.alpha = 0) +
geom_point(alpha=0.6) +
scale_x_continuous(trans = "log10") +
facet_wrap( ~ var + var_y, scales = "free", ncol=1) +
theme_bw()
)
# Missingness
gg.data_tidy.scatter_missing <- data_tidy %>%
filter(
(var %in% c("FS", "anti_SS_B_ab"))
) %>%
mutate(
flg.na = factor(
ifelse(is.na(val), "missing", "observed")
)
) %>%
ggplot(
aes(
x = as.numeric(flg.na),
y = val_y,
group = flg.na
)
)
plot.boxplot.scatter_missing.Subgroup_pri_sec <- plot(
gg.data_tidy.scatter_missing +
geom_boxplot(
color ="black",
outlier.alpha = 0
) +
geom_jitter(
color ="black",
width = 0.2, alpha=0.8, size=0.75
) +
facet_grid(
var + var_y ~ disease_group + nijisei, scales = "free"
) +
theme_bw() +
scale_x_discrete()
)
plot.boxplot.scatter_missing <- plot(
gg.data_tidy.scatter_missing +
geom_boxplot(color ="black", outlier.alpha = 0) +
geom_jitter(color ="black", width = 0.2, alpha=0.8, size=0.75) +
facet_wrap( ~ var + var_y, ncol = 1, scales = "free") +
theme_bw() +
scale_x_discrete()
)
# Mutual Information ------------------------------------------------------
MIPermute_Ro52_self <- ExploratoryDataAnalysis::MIPermute(
#mutinformation(
X=data_Dichotomyze$Ro52_log10,
Y=data_Dichotomyze$Ro52_log10,
method = "shrink",
n.sim = 500
)[1,2]
MIPermute_Ro60_self <- ExploratoryDataAnalysis::MIPermute(
#mutinformation(
X=data_Dichotomyze$Ro60_log10,
Y=data_Dichotomyze$Ro60_log10,
method = "shrink",
n.sim = 500
)[1,2]
AECG_component
pdf(
sprintf(
"%s/%s.pdf",
dir.output,
"hist.mutinfo_Ro52_SS.Primary"
)
)
for(i in 1:length(AECG_component.num)){
res.MIPermute <- ExploratoryDataAnalysis::MIPermute(
#mutinformation(
X=data_Dichotomyze$Ro52_log10[data_Dichotomyze$nijisei=="Primary"],
Y=data_Dichotomyze[data_Dichotomyze$nijisei=="Primary",AECG_component.num[i]],
n.sim = 10000,
method="MIC",
disc.X = "none",
disc.Y = "none",
use = 'pairwise.complete.obs'
)
q.95 <- quantile(res.MIPermute$V1, 0.95)
assign(
sprintf(
"MIPermute_Ro52_%s", AECG_component.num[i]
),
res.MIPermute
)
hist(
res.MIPermute$V1,
breaks='FD',
main = AECG_component.num[i]
)
abline(
v=res.MIPermute[res.MIPermute$i==1, 'V1'],
col='red'
)
abline(
v=q.95,
col='red',
lty=2
)
print(AECG_component.num[i])
}
dev.off()
pdf(
sprintf(
"%s/%s.pdf",
dir.output,
"hist.mutinfo_Ro60_SS.Primary"
)
)
for(i in 1:length(AECG_component.num)){
res.MIPermute <- ExploratoryDataAnalysis::MIPermute(
#mutinformation(
X=data_Dichotomyze$Ro60_log10,
Y=data_Dichotomyze[,AECG_component.num[i]],
n.sim = 10000,
method="MIC",
use = 'pairwise.complete.obs'
)
q.95 <- quantile(
res.MIPermute$V1, 0.95
)
assign(
sprintf(
"MIPermute_Ro60_%s", AECG_component.num[i]
),
res.MIPermute
)
hist(
res.MIPermute$V1,
breaks='FD',
main = AECG_component.num[i]
)
abline(
v=res.MIPermute[res.MIPermute$i==1, 'V1'],
col='red'
)
abline(
v=q.95,
col='red',
lty=2
)
print(AECG_component.num[i])
}
dev.off()
# Tabulate the results from permutation test of MIC analysis --------------
MIPermute_Ro52_anti_SS_B_ab$pval <-
1 - rank(MIPermute_Ro52_anti_SS_B_ab$V1)/
nrow(MIPermute_Ro52_anti_SS_B_ab)
MIPermute_Ro52_anti_SS_B_ab$dataname <-
"MIPermute_Ro52_anti_SS_B_ab"
MIPermute_Ro60_anti_SS_B_ab$pval <-
1- rank(MIPermute_Ro60_anti_SS_B_ab$V1)/
nrow(MIPermute_Ro60_anti_SS_B_ab)
MIPermute_Ro60_anti_SS_B_ab$dataname <-
"MIPermute_Ro60_anti_SS_B_ab"
MIPermute_Ro52_FS$pval <-
1 - rank(MIPermute_Ro52_FS$V1)/
nrow(MIPermute_Ro52_FS)
MIPermute_Ro52_FS$dataname <-
"MIPermute_Ro52_FS"
MIPermute_Ro60_FS$pval <-
1 - rank(MIPermute_Ro60_FS$V1)/
nrow(MIPermute_Ro60_FS)
MIPermute_Ro60_FS$dataname <-
"MIPermute_Ro60_FS"
MIPermute <- MIPermute_Ro52_anti_SS_B_ab %>%
rbind(MIPermute_Ro52_FS) %>%
rbind(MIPermute_Ro60_anti_SS_B_ab) %>%
rbind(MIPermute_Ro60_FS) %>%
filter(i==1) %>%
dplyr::select(pval, dataname)
# Output ------------------------------------------------------------------
g1 <- ggplotGrob(plot.scatterplot)
g2 <- ggplotGrob(plot.boxplot.scatter_missing)
pdf(
sprintf(
"%s/%s.pdf",
dir.output,
"scatterplot_with_miss.box"
),
height = 56,
width = 10
)
plot_grid(
g1,g2,
align = "h",axis = "l", ncol = 2, rel_widths = c(5/7, 2/7)#, 1/8)
)
dev.off()
pdf(
file=sprintf(
"%s/%s.pdf",
dir.output,
"boxplot"
),
# type = "pdf",
# device = dev.cur(),
# dpi = 300,
width = 10,
height= 180
)
plot.boxplot
dev.off()
pdf(
file=sprintf(
"%s/%s.pdf",
dir.output,
"scatterplot"
),
# type = "pdf",
# device = dev.cur(),
# dpi = 300,
width = 70/8,
height= 70
)
plot.scatterplot
dev.off()
pdf(
file=sprintf(
"%s/%s.pdf",
dir.output,
"scatterplot.Subgroup_pri_sec"
),
# type = "pdf",
# device = dev.cur(),
# dpi = 300,
width = 70/4,
height= 70
)
plot.scatterplot.Subgroup_pri_sec
dev.off()
pdf(
file=sprintf(
"%s/%s.pdf",
dir.output,
"boxplot.scatter_missing"
),
# type = "pdf",
# device = dev.cur(),
# dpi = 300,
width = 7,
height= 70
)
plot.boxplot.scatter_missing
dev.off()
pdf(
file=sprintf(
"%s/%s.pdf",
dir.output,
"boxplot.scatter_missing.Subgroup_pri_sec"
),
# type = "pdf",
# device = dev.cur(),
# dpi = 300,
width = 5,
height= 70
)
plot.boxplot.scatter_missing.Subgroup_pri_sec
dev.off()
write.csv(
file = sprintf(
"%s/%s.csv",
dir.output, "p_value.MIPermute_SS.Primary"
),
MIPermute
)
write.csv(
file = sprintf(
"%s/%s.csv",
dir.output, "coef_lmrob_Ro52.AECGcomponent.total_SS"
),
coef.lmrob.Ro52_log10 %>%
mutate_if(
is.numeric, function(x)round(x, 3)
)
)
write.csv(
file = sprintf(
"%s/%s.csv",
dir.output, "coef_lmrob_Ro60.AECGcomponent.total_SS"
),
coef.lmrob.Ro60_log10 %>%
mutate_if(
is.numeric, function(x)round(x, 3)
)
)
# Endrant -----------------------------------------------------------------
| /src/R/final_correlation_analysis_v2_nonNijisei.R | no_license | mrmtshmp/Ro52 | R | false | false | 13,279 | r | # Description: ----------------------------------------------
# .
# The concept of the analysis is in ../Doc/mtg_190402.pdf.
#
# 19/ 04/ 21-
# Settings: -------------------------------------------------
set.seed(22)
dir.sub <- "../sub"
fn.require_packages.R <- "require_libraries.R"
dir.data <- "../../Data"
fn.data <- "AnalysisDataSet_v4.0.RData"
load(sprintf("%s/%s", dir.data, fn.data))
data$Ro52_log10 <- log(
data$Ro52,
base = 10
)
data$Ro60_log10 <- log(
data$anti_SS_A_ab,
base = 10
)
dir.output <- "../../Output/Final"
# Load subroutines: ----------------------------------------------
Bibtex <- FALSE
source(
sprintf(
"%s/%s", dir.sub, fn.require_packages.R
)
)
# Dichotomyze outcomes for Lip_biopsy and ESSDAI -----------------------------------------------------
data_Dichotomyze <- data %>%
mutate(
Ro52_ext.High = # lately (within this pipe) converted to factor
ifelse(Ro52 > 500, 1, 0),
Lip_biopsy_Dimyze_by_0 =
ifelse(
is.na(Lip_biopsy_tri), NA,
ifelse(
Lip_biopsy_tri ==
0, 0, 1
)
),
Lip_biopsy_Dimyze_by_3 =
ifelse(
is.na(Lip_biopsy_tri), NA,
ifelse(
Lip_biopsy_tri %in%
c(0, 3), 0, 1
)
),
ESSDAI_Dimyze_by_2 =
ifelse(
is.na(ESSDAI), NA,
ifelse(
ESSDAI <= 2, 0, 1
)
),
ESSDAI_Dimyze_by_5 =
ifelse(
is.na(ESSDAI), NA,
ifelse(
ESSDAI <= 5, 0, 1
)
),
FS_Dimyze_by_0 =
ifelse(
is.na(FS), NA,
ifelse(
FS < 1, 0, 1
)
),
FS_Dimyze_by_2 =
ifelse(
is.na(FS), NA,
ifelse(
FS < 3, 0, 1
)
),
FS_Dimyze_by_4 =
ifelse(
is.na(FS), NA,
ifelse(
FS < 5, 0, 1
)
)
) %>%
mutate(Ro52_ext.High=factor(Ro52_ext.High, levels = c(0,1), labels = c("Normal", "High"))) %>%
filter(SS==1)
AECG_component.num <- c(
"FS",
"anti_SS_B_ab"
)
AECG_component <- c(
"Dry_mouth", #component
"Dry_eye", #component
"FS", #component
"Lip_biopsy_tri", #component
"anti_SS_B_ab", #component
"anti_SS_B_ab_pn", #component
"ACA_np",
"Raynaud_np",
"RF_pn",
"IgG_pn",
"Saxon_test_np",
"Schirmer_test_np",
"nijisei"
)
# Data shaping ----------------------------------------------------------
data_tidy <- data_Dichotomyze %>%
filter(
disease_group=="SS"
) %>%
mutate(
Ro60 = anti_SS_A_ab
) %>%
mutate_if(is.factor, as.numeric) %>%
gather(
var, val,
-SubjID, -Ro52, -Ro52_log10,-Ro60, -Age,
-disease_group, -nijisei
) %>%
mutate(
val = as.numeric(val),
Ro60_log10 = log(Ro60,base = 10)
) %>%
filter(
var %in%
c(AECG_component,"Ro52_ext.High")
) %>%
gather(
var_y, val_y,
-SubjID, -Age, -var, -val,
-disease_group,- nijisei
)
# ANOVA -------------------------------------------------------------------
df.ANOVA <- data_Dichotomyze %>%
dplyr::select(
SubjID,
Ro52_log10,
Ro60_log10,
AECG_component
)
# make list of covariates
list.formula <- dlply(
data.frame(
"id" = 1:length(AECG_component[c(1,2,4,6,7,8,9,10,11,12,13)]),
"var"= AECG_component[c(1,2,4,6,7,8,9,10,11,12,13)]
),
.(id),
function(D){
fmr = sprintf(
"%s~%s",
"Ro52_log10",
D$var
)
return(fmr)
}
)
res.lmrob.Ro52_log10 <-
llply(
list.formula,
function(L){
robustbase::lmrob(
as.formula(L),
df.ANOVA,
method = 'MM',
# setting="KS2014",
control = lmrob.control(maxit.scale = 2000)
)
}
)
# make list of covariates
list.formula_Ro60 <- dlply(
data.frame(
"id" = 1:length(AECG_component[c(1,2,4,6,7,8,9,10,11,12)]),
"var"= AECG_component[c(1,2,4,6,7,8,9,10,11,12)]
),
.(id),
function(D){
fmr = sprintf(
"%s~%s",
"Ro60_log10",
D$var
)
return(fmr)
}
)
res.lmrob.Ro60_log10 <-
llply(
list.formula_Ro60,
function(L){
robustbase::lmrob(
as.formula(L),
df.ANOVA,
method = 'MM',
# setting="KS2014",
control = lmrob.control(maxit.scale = 2000)
)
}
)
# Extract estimated coefficients
coef.lmrob.Ro52_log10 <- llply(
res.lmrob.Ro52_log10,
summary
) %>%
ldply(
function(L){
out <- coef(L) %>%
as.data.frame() %>%
rownames_to_column("terms")
return(out)
}
)
coef.lmrob.Ro60_log10 <- llply(
res.lmrob.Ro60_log10,
summary
) %>%
ldply(
function(L){
out <- coef(L) %>%
as.data.frame() %>%
rownames_to_column("terms")
return(out)
}
)
# Boxplot -----------------------------------------------------------------
gg.data_tidy <- data_tidy %>%
filter(
var %in% c(
AECG_component[c(1,2,6,7,8,9,10,11,12)],
"Ro52_ext.High"
)
) %>%
mutate(
thre = ifelse(
var_y=="Ro52", 10,
ifelse(
var_y=="Ro52_log10", 1, NA
)
),
Alpha = ifelse(
var_y %in% c("Ro52", "Ro52_log10"), 0.8, 0
)
) %>%
ggplot(
aes(
x = as.factor(val),
y = val_y,
yintercept = thre
)
)
plot.boxplot <- plot(
gg.data_tidy + geom_boxplot(outlier.alpha = 0) +
geom_beeswarm(col="black", size=1, alpha=1) +
# geom_jitter(col="black", size=0.2, alpha=1, height = 0.01, width = 0.1) +
geom_hline(aes(yintercept = thre, alpha = Alpha), size=0.5, col="black") +
facet_grid(
var + var_y ~ disease_group,# + nijisei,
scales = "free") +
theme_bw()
)
# Scatterplot -----------------------------------------------------------------
gg.data_tidy.scatter <- data_tidy %>%
filter(
(var %in% c("FS", "anti_SS_B_ab"))
) %>%
# filter(!is.na(val)) %>%
ggplot(
aes(
x = as.numeric(val),
y = val_y
)
)
plot.scatterplot <- plot(
gg.data_tidy.scatter + #geom_boxplot(outlier.alpha = 0) +
geom_point(alpha=0.6) +
scale_x_continuous(trans = "log10") +
facet_grid( var + var_y ~., scales = "free") +
theme_bw()
)
gg.data_tidy.scatter <- data_tidy %>%
filter(
(var %in% c("FS", "anti_SS_B_ab"))
) %>%
# filter(!is.na(val)) %>%
ggplot(
aes(
x = as.numeric(val),
y = val_y
)
)
plot.scatterplot.Subgroup_pri_sec <- plot(
gg.data_tidy.scatter + #geom_boxplot(outlier.alpha = 0) +
geom_point(alpha=0.6) +
scale_x_continuous(trans = "log10") +
facet_wrap( var + var_y ~ disease_group + nijisei, scales = "free") +
theme_bw()
)
plot.scatterplot <- plot(
gg.data_tidy.scatter + #geom_boxplot(outlier.alpha = 0) +
geom_point(alpha=0.6) +
scale_x_continuous(trans = "log10") +
facet_wrap( ~ var + var_y, scales = "free", ncol=1) +
theme_bw()
)
# Missingness
gg.data_tidy.scatter_missing <- data_tidy %>%
filter(
(var %in% c("FS", "anti_SS_B_ab"))
) %>%
mutate(
flg.na = factor(
ifelse(is.na(val), "missing", "observed")
)
) %>%
ggplot(
aes(
x = as.numeric(flg.na),
y = val_y,
group = flg.na
)
)
plot.boxplot.scatter_missing.Subgroup_pri_sec <- plot(
gg.data_tidy.scatter_missing +
geom_boxplot(
color ="black",
outlier.alpha = 0
) +
geom_jitter(
color ="black",
width = 0.2, alpha=0.8, size=0.75
) +
facet_grid(
var + var_y ~ disease_group + nijisei, scales = "free"
) +
theme_bw() +
scale_x_discrete()
)
plot.boxplot.scatter_missing <- plot(
gg.data_tidy.scatter_missing +
geom_boxplot(color ="black", outlier.alpha = 0) +
geom_jitter(color ="black", width = 0.2, alpha=0.8, size=0.75) +
facet_wrap( ~ var + var_y, ncol = 1, scales = "free") +
theme_bw() +
scale_x_discrete()
)
# Mutual Information ------------------------------------------------------
MIPermute_Ro52_self <- ExploratoryDataAnalysis::MIPermute(
#mutinformation(
X=data_Dichotomyze$Ro52_log10,
Y=data_Dichotomyze$Ro52_log10,
method = "shrink",
n.sim = 500
)[1,2]
MIPermute_Ro60_self <- ExploratoryDataAnalysis::MIPermute(
#mutinformation(
X=data_Dichotomyze$Ro60_log10,
Y=data_Dichotomyze$Ro60_log10,
method = "shrink",
n.sim = 500
)[1,2]
AECG_component
pdf(
sprintf(
"%s/%s.pdf",
dir.output,
"hist.mutinfo_Ro52_SS.Primary"
)
)
for(i in 1:length(AECG_component.num)){
res.MIPermute <- ExploratoryDataAnalysis::MIPermute(
#mutinformation(
X=data_Dichotomyze$Ro52_log10[data_Dichotomyze$nijisei=="Primary"],
Y=data_Dichotomyze[data_Dichotomyze$nijisei=="Primary",AECG_component.num[i]],
n.sim = 10000,
method="MIC",
disc.X = "none",
disc.Y = "none",
use = 'pairwise.complete.obs'
)
q.95 <- quantile(res.MIPermute$V1, 0.95)
assign(
sprintf(
"MIPermute_Ro52_%s", AECG_component.num[i]
),
res.MIPermute
)
hist(
res.MIPermute$V1,
breaks='FD',
main = AECG_component.num[i]
)
abline(
v=res.MIPermute[res.MIPermute$i==1, 'V1'],
col='red'
)
abline(
v=q.95,
col='red',
lty=2
)
print(AECG_component.num[i])
}
dev.off()
pdf(
sprintf(
"%s/%s.pdf",
dir.output,
"hist.mutinfo_Ro60_SS.Primary"
)
)
for(i in 1:length(AECG_component.num)){
res.MIPermute <- ExploratoryDataAnalysis::MIPermute(
#mutinformation(
X=data_Dichotomyze$Ro60_log10,
Y=data_Dichotomyze[,AECG_component.num[i]],
n.sim = 10000,
method="MIC",
use = 'pairwise.complete.obs'
)
q.95 <- quantile(
res.MIPermute$V1, 0.95
)
assign(
sprintf(
"MIPermute_Ro60_%s", AECG_component.num[i]
),
res.MIPermute
)
hist(
res.MIPermute$V1,
breaks='FD',
main = AECG_component.num[i]
)
abline(
v=res.MIPermute[res.MIPermute$i==1, 'V1'],
col='red'
)
abline(
v=q.95,
col='red',
lty=2
)
print(AECG_component.num[i])
}
dev.off()
# Tabulate the results from permutation test of MIC analysis --------------
MIPermute_Ro52_anti_SS_B_ab$pval <-
1 - rank(MIPermute_Ro52_anti_SS_B_ab$V1)/
nrow(MIPermute_Ro52_anti_SS_B_ab)
MIPermute_Ro52_anti_SS_B_ab$dataname <-
"MIPermute_Ro52_anti_SS_B_ab"
MIPermute_Ro60_anti_SS_B_ab$pval <-
1- rank(MIPermute_Ro60_anti_SS_B_ab$V1)/
nrow(MIPermute_Ro60_anti_SS_B_ab)
MIPermute_Ro60_anti_SS_B_ab$dataname <-
"MIPermute_Ro60_anti_SS_B_ab"
MIPermute_Ro52_FS$pval <-
1 - rank(MIPermute_Ro52_FS$V1)/
nrow(MIPermute_Ro52_FS)
MIPermute_Ro52_FS$dataname <-
"MIPermute_Ro52_FS"
MIPermute_Ro60_FS$pval <-
1 - rank(MIPermute_Ro60_FS$V1)/
nrow(MIPermute_Ro60_FS)
MIPermute_Ro60_FS$dataname <-
"MIPermute_Ro60_FS"
MIPermute <- MIPermute_Ro52_anti_SS_B_ab %>%
rbind(MIPermute_Ro52_FS) %>%
rbind(MIPermute_Ro60_anti_SS_B_ab) %>%
rbind(MIPermute_Ro60_FS) %>%
filter(i==1) %>%
dplyr::select(pval, dataname)
# Output ------------------------------------------------------------------
g1 <- ggplotGrob(plot.scatterplot)
g2 <- ggplotGrob(plot.boxplot.scatter_missing)
pdf(
sprintf(
"%s/%s.pdf",
dir.output,
"scatterplot_with_miss.box"
),
height = 56,
width = 10
)
plot_grid(
g1,g2,
align = "h",axis = "l", ncol = 2, rel_widths = c(5/7, 2/7)#, 1/8)
)
dev.off()
pdf(
file=sprintf(
"%s/%s.pdf",
dir.output,
"boxplot"
),
# type = "pdf",
# device = dev.cur(),
# dpi = 300,
width = 10,
height= 180
)
plot.boxplot
dev.off()
pdf(
file=sprintf(
"%s/%s.pdf",
dir.output,
"scatterplot"
),
# type = "pdf",
# device = dev.cur(),
# dpi = 300,
width = 70/8,
height= 70
)
plot.scatterplot
dev.off()
pdf(
file=sprintf(
"%s/%s.pdf",
dir.output,
"scatterplot.Subgroup_pri_sec"
),
# type = "pdf",
# device = dev.cur(),
# dpi = 300,
width = 70/4,
height= 70
)
plot.scatterplot.Subgroup_pri_sec
dev.off()
pdf(
file=sprintf(
"%s/%s.pdf",
dir.output,
"boxplot.scatter_missing"
),
# type = "pdf",
# device = dev.cur(),
# dpi = 300,
width = 7,
height= 70
)
plot.boxplot.scatter_missing
dev.off()
pdf(
file=sprintf(
"%s/%s.pdf",
dir.output,
"boxplot.scatter_missing.Subgroup_pri_sec"
),
# type = "pdf",
# device = dev.cur(),
# dpi = 300,
width = 5,
height= 70
)
plot.boxplot.scatter_missing.Subgroup_pri_sec
dev.off()
write.csv(
file = sprintf(
"%s/%s.csv",
dir.output, "p_value.MIPermute_SS.Primary"
),
MIPermute
)
write.csv(
file = sprintf(
"%s/%s.csv",
dir.output, "coef_lmrob_Ro52.AECGcomponent.total_SS"
),
coef.lmrob.Ro52_log10 %>%
mutate_if(
is.numeric, function(x)round(x, 3)
)
)
write.csv(
file = sprintf(
"%s/%s.csv",
dir.output, "coef_lmrob_Ro60.AECGcomponent.total_SS"
),
coef.lmrob.Ro60_log10 %>%
mutate_if(
is.numeric, function(x)round(x, 3)
)
)
# Endrant -----------------------------------------------------------------
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crayon-package.r, R/machinery.r
\docType{package}
\name{crayon}
\alias{crayon}
\alias{crayon-package}
\alias{crayon}
\alias{reset}
\alias{bold}
\alias{blurred}
\alias{italic}
\alias{underline}
\alias{inverse}
\alias{hidden}
\alias{strikethrough}
\alias{black}
\alias{red}
\alias{green}
\alias{yellow}
\alias{blue}
\alias{magenta}
\alias{cyan}
\alias{white}
\alias{silver}
\alias{bgBlack}
\alias{bgRed}
\alias{bgGreen}
\alias{bgYellow}
\alias{bgBlue}
\alias{bgMagenta}
\alias{bgCyan}
\alias{bgWhite}
\title{Colored terminal output}
\usage{
## Simple styles
red(...)
bold(...)
...
## See more styling below
}
\arguments{
\item{...}{Strings to style.}
}
\description{
With crayon it is easy to add color to terminal output, create styles
for notes, warnings, errors; and combine styles.
}
\details{
ANSI color support is automatically detected and used. Crayon was largely
inspired by chalk \url{https://github.com/sindresorhus/chalk}.
Crayon defines several styles, that can be combined. Each style in the list
has a corresponding function with the same name.
}
\section{Genaral styles}{
\itemize{
\item reset
\item bold
\item blurred (usually called \sQuote{dim}, renamed to avoid name clash)
\item italic (not widely supported)
\item underline
\item inverse
\item hidden
\item strikethrough (not widely supported)
}
}
\section{Text colors}{
\itemize{
\item black
\item red
\item green
\item yellow
\item blue
\item magenta
\item cyan
\item white
\item silver (usually called \sQuote{gray}, renamed to avoid name clash)
}
}
\section{Background colors}{
\itemize{
\item bgBlack
\item bgRed
\item bgGreen
\item bgYellow
\item bgBlue
\item bgMagenta
\item bgCyan
\item bgWhite
}
}
\section{Styling}{
The styling functions take any number of character vectors as arguments,
and they concatenate and style them: \preformatted{ library(crayon)
cat(blue("Hello", "world!\n"))
}
Crayon defines the \code{\%+\%} string concatenation operator, to make it easy
to assemble stings with different styles. \preformatted{ cat("... to highlight the " \%+\% red("search term") \%+\%
" in a block of text\n")
}
Styles can be combined using the \code{$} operator: \preformatted{ cat(yellow$bgMagenta$bold('Hello world!\n'))
} See also \code{\link{combine_styles}}.
Styles can also be nested, and then inner style takes
precedence: \preformatted{ cat(green(
'I am a green line ' \%+\%
blue$underline$bold('with a blue substring') \%+\%
' that becomes green again!\n'
))
}
It is easy to define your own themes: \preformatted{ error <- red $ bold
warn <- magenta $ underline
note <- cyan
cat(error("Error: subscript out of bounds!\n"))
cat(warn("Warning: shorter argument was recycled.\n"))
cat(note("Note: no such directory.\n"))
}
}
\examples{
cat(blue("Hello", "world!"))
cat("... to highlight the " \%+\% red("search term") \%+\%
" in a block of text")
cat(yellow$bgMagenta$bold('Hello world!'))
cat(green(
'I am a green line ' \%+\%
blue$underline$bold('with a blue substring') \%+\%
' that becomes green again!'
))
error <- red $ bold
warn <- magenta $ underline
note <- cyan
cat(error("Error: subscript out of bounds!\\n"))
cat(warn("Warning: shorter argument was recycled.\\n"))
cat(note("Note: no such directory.\\n"))
}
\seealso{
\code{\link{make_style}} for using the 256 ANSI colors.
}
| /man/crayon.Rd | no_license | jmpasmoi/crayon | R | false | true | 3,470 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crayon-package.r, R/machinery.r
\docType{package}
\name{crayon}
\alias{crayon}
\alias{crayon-package}
\alias{crayon}
\alias{reset}
\alias{bold}
\alias{blurred}
\alias{italic}
\alias{underline}
\alias{inverse}
\alias{hidden}
\alias{strikethrough}
\alias{black}
\alias{red}
\alias{green}
\alias{yellow}
\alias{blue}
\alias{magenta}
\alias{cyan}
\alias{white}
\alias{silver}
\alias{bgBlack}
\alias{bgRed}
\alias{bgGreen}
\alias{bgYellow}
\alias{bgBlue}
\alias{bgMagenta}
\alias{bgCyan}
\alias{bgWhite}
\title{Colored terminal output}
\usage{
## Simple styles
red(...)
bold(...)
...
## See more styling below
}
\arguments{
\item{...}{Strings to style.}
}
\description{
With crayon it is easy to add color to terminal output, create styles
for notes, warnings, errors; and combine styles.
}
\details{
ANSI color support is automatically detected and used. Crayon was largely
inspired by chalk \url{https://github.com/sindresorhus/chalk}.
Crayon defines several styles, that can be combined. Each style in the list
has a corresponding function with the same name.
}
\section{Genaral styles}{
\itemize{
\item reset
\item bold
\item blurred (usually called \sQuote{dim}, renamed to avoid name clash)
\item italic (not widely supported)
\item underline
\item inverse
\item hidden
\item strikethrough (not widely supported)
}
}
\section{Text colors}{
\itemize{
\item black
\item red
\item green
\item yellow
\item blue
\item magenta
\item cyan
\item white
\item silver (usually called \sQuote{gray}, renamed to avoid name clash)
}
}
\section{Background colors}{
\itemize{
\item bgBlack
\item bgRed
\item bgGreen
\item bgYellow
\item bgBlue
\item bgMagenta
\item bgCyan
\item bgWhite
}
}
\section{Styling}{
The styling functions take any number of character vectors as arguments,
and they concatenate and style them: \preformatted{ library(crayon)
cat(blue("Hello", "world!\n"))
}
Crayon defines the \code{\%+\%} string concatenation operator, to make it easy
to assemble stings with different styles. \preformatted{ cat("... to highlight the " \%+\% red("search term") \%+\%
" in a block of text\n")
}
Styles can be combined using the \code{$} operator: \preformatted{ cat(yellow$bgMagenta$bold('Hello world!\n'))
} See also \code{\link{combine_styles}}.
Styles can also be nested, and then inner style takes
precedence: \preformatted{ cat(green(
'I am a green line ' \%+\%
blue$underline$bold('with a blue substring') \%+\%
' that becomes green again!\n'
))
}
It is easy to define your own themes: \preformatted{ error <- red $ bold
warn <- magenta $ underline
note <- cyan
cat(error("Error: subscript out of bounds!\n"))
cat(warn("Warning: shorter argument was recycled.\n"))
cat(note("Note: no such directory.\n"))
}
}
\examples{
cat(blue("Hello", "world!"))
cat("... to highlight the " \%+\% red("search term") \%+\%
" in a block of text")
cat(yellow$bgMagenta$bold('Hello world!'))
cat(green(
'I am a green line ' \%+\%
blue$underline$bold('with a blue substring') \%+\%
' that becomes green again!'
))
error <- red $ bold
warn <- magenta $ underline
note <- cyan
cat(error("Error: subscript out of bounds!\\n"))
cat(warn("Warning: shorter argument was recycled.\\n"))
cat(note("Note: no such directory.\\n"))
}
\seealso{
\code{\link{make_style}} for using the 256 ANSI colors.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/uninstall.R
\name{uninstall}
\alias{uninstall}
\title{Uninstall a local development package.}
\usage{
uninstall(pkg = ".", unload = TRUE, quiet = FALSE, ...)
}
\arguments{
\item{pkg}{package description, can be path or package name. See
\code{\link[=as.package]{as.package()}} for more information}
\item{unload}{if \code{TRUE} (the default), will automatically unload the
package prior to uninstalling.}
\item{quiet}{if \code{TRUE} suppresses output from this function.}
\item{...}{additional arguments passed to \code{\link[=remove.packages]{remove.packages()}}.}
}
\description{
Uses \code{remove.package} to uninstall the package.
To uninstall a package from a non-default library,
use \code{\link[withr:with_libpaths]{withr::with_libpaths()}}.
}
\seealso{
\code{\link[=with_debug]{with_debug()}} to install packages with debugging flags
set.
Other package installation: \code{\link{install}}
}
\concept{package installation}
| /man/uninstall.Rd | no_license | 2954722256/devtools | R | false | true | 1,013 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/uninstall.R
\name{uninstall}
\alias{uninstall}
\title{Uninstall a local development package.}
\usage{
uninstall(pkg = ".", unload = TRUE, quiet = FALSE, ...)
}
\arguments{
\item{pkg}{package description, can be path or package name. See
\code{\link[=as.package]{as.package()}} for more information}
\item{unload}{if \code{TRUE} (the default), will automatically unload the
package prior to uninstalling.}
\item{quiet}{if \code{TRUE} suppresses output from this function.}
\item{...}{additional arguments passed to \code{\link[=remove.packages]{remove.packages()}}.}
}
\description{
Uses \code{remove.package} to uninstall the package.
To uninstall a package from a non-default library,
use \code{\link[withr:with_libpaths]{withr::with_libpaths()}}.
}
\seealso{
\code{\link[=with_debug]{with_debug()}} to install packages with debugging flags
set.
Other package installation: \code{\link{install}}
}
\concept{package installation}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Feature_Selection.R
\name{DaMiR.FSelect}
\alias{DaMiR.FSelect}
\title{Feature selection for classification}
\usage{
DaMiR.FSelect(
data,
df,
th.corr = 0.6,
type = c("spearman", "pearson"),
th.VIP = 3,
nPlsIter = 1
)
}
\arguments{
\item{data}{A transposed data frame or a matrix of normalized expression
data.
Rows and Cols should be,
respectively, observations and features}
\item{df}{A data frame with known variables; at least one column
with
'class' label must be included}
\item{th.corr}{Minimum threshold of correlation between class and
PCs; default is 0.6. Note. If df$class has more than two levels,
this option is disable and the number of PCs is set to 3.}
\item{type}{Type of correlation metric; default is "spearman"}
\item{th.VIP}{Threshold for \code{bve_pls} function, to remove
non-important variables; default is 3}
\item{nPlsIter}{Number of times that \link{bve_pls} has to run.
Each iteration produces a set of selected features, usually similar
to each other but not exacly the same! When nPlsIter is > 1, the
intersection between each set of selected features is performed;
so that, only the most robust features are selected. Default is 1}
}
\value{
A list containing:
\itemize{
\item An expression matrix with only informative features.
\item A data frame with class and optional variables information.
}
}
\description{
This function identifies the class-correlated principal
components (PCs)
which are then used to implement a backward variable elimination
procedure for the removal of non informative features.
}
\details{
The function aims to reduce the number of features to obtain
the most informative variables for classification purpose. First,
PCs obtained by principal component analysis (PCA) are correlated
with "class". The correlation threshold is defined by the user
in \code{th.corr} argument. The higher is the correlation, the
lower is the number of PCs returned. Importantly, if df$class has
more than two levels, the number of PCs is automatically set to 3.
In a binary experimental setting, users should pay attention to
appropriately set the \code{th.corr} argument because it will also
affect the total number of selected features that ultimately
depend on the number of PCs. The \code{\link{bve_pls}} function
of \code{plsVarSel} package is, then, applied.
This function exploits a backward variable elimination procedure
coupled to a partial least squares approach to remove those variable
which are less informative with respect to class. The returned
vector of variables is further reduced by the following
\code{\link{DaMiR.FReduct}} function in order to obtain a subset of
non correlated putative predictors.
}
\examples{
# use example data:
data(data_norm)
data(df)
# extract expression data from SummarizedExperiment object
# and transpose the matrix:
t_data<-t(assay(data_norm))
t_data <- t_data[,seq_len(100)]
# select class-related features
data_reduced <- DaMiR.FSelect(t_data, df,
th.corr = 0.7, type = "spearman", th.VIP = 1)
}
\references{
Tahir Mehmood, Kristian Hovde Liland, Lars Snipen and
Solve Saebo (2011).
A review of variable selection methods in Partial Least Squares
Regression. Chemometrics and Intelligent Laboratory Systems
118, pp. 62-69.
}
\seealso{
\itemize{
\item \code{\link{bve_pls}}
\item \code{\link{DaMiR.FReduct}}
}
}
\author{
Mattia Chiesa, Luca Piacentini
}
| /man/DaMiR.FSelect.Rd | no_license | BioinfoMonzino/DaMiRseq | R | false | true | 3,451 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Feature_Selection.R
\name{DaMiR.FSelect}
\alias{DaMiR.FSelect}
\title{Feature selection for classification}
\usage{
DaMiR.FSelect(
data,
df,
th.corr = 0.6,
type = c("spearman", "pearson"),
th.VIP = 3,
nPlsIter = 1
)
}
\arguments{
\item{data}{A transposed data frame or a matrix of normalized expression
data.
Rows and Cols should be,
respectively, observations and features}
\item{df}{A data frame with known variables; at least one column
with
'class' label must be included}
\item{th.corr}{Minimum threshold of correlation between class and
PCs; default is 0.6. Note. If df$class has more than two levels,
this option is disable and the number of PCs is set to 3.}
\item{type}{Type of correlation metric; default is "spearman"}
\item{th.VIP}{Threshold for \code{bve_pls} function, to remove
non-important variables; default is 3}
\item{nPlsIter}{Number of times that \link{bve_pls} has to run.
Each iteration produces a set of selected features, usually similar
to each other but not exacly the same! When nPlsIter is > 1, the
intersection between each set of selected features is performed;
so that, only the most robust features are selected. Default is 1}
}
\value{
A list containing:
\itemize{
\item An expression matrix with only informative features.
\item A data frame with class and optional variables information.
}
}
\description{
This function identifies the class-correlated principal
components (PCs)
which are then used to implement a backward variable elimination
procedure for the removal of non informative features.
}
\details{
The function aims to reduce the number of features to obtain
the most informative variables for classification purpose. First,
PCs obtained by principal component analysis (PCA) are correlated
with "class". The correlation threshold is defined by the user
in \code{th.corr} argument. The higher is the correlation, the
lower is the number of PCs returned. Importantly, if df$class has
more than two levels, the number of PCs is automatically set to 3.
In a binary experimental setting, users should pay attention to
appropriately set the \code{th.corr} argument because it will also
affect the total number of selected features that ultimately
depend on the number of PCs. The \code{\link{bve_pls}} function
of \code{plsVarSel} package is, then, applied.
This function exploits a backward variable elimination procedure
coupled to a partial least squares approach to remove those variable
which are less informative with respect to class. The returned
vector of variables is further reduced by the following
\code{\link{DaMiR.FReduct}} function in order to obtain a subset of
non correlated putative predictors.
}
\examples{
# use example data:
data(data_norm)
data(df)
# extract expression data from SummarizedExperiment object
# and transpose the matrix:
t_data<-t(assay(data_norm))
t_data <- t_data[,seq_len(100)]
# select class-related features
data_reduced <- DaMiR.FSelect(t_data, df,
th.corr = 0.7, type = "spearman", th.VIP = 1)
}
\references{
Tahir Mehmood, Kristian Hovde Liland, Lars Snipen and
Solve Saebo (2011).
A review of variable selection methods in Partial Least Squares
Regression. Chemometrics and Intelligent Laboratory Systems
118, pp. 62-69.
}
\seealso{
\itemize{
\item \code{\link{bve_pls}}
\item \code{\link{DaMiR.FReduct}}
}
}
\author{
Mattia Chiesa, Luca Piacentini
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/accesorMethods.r
\name{rec_month<-}
\alias{rec_month<-}
\title{rec_month}
\usage{
rec_month(object, ...) <- value
}
\description{
rec_month
}
| /man/rec_month-set.Rd | no_license | lauratboyer/FLR4MFCL | R | false | false | 229 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/accesorMethods.r
\name{rec_month<-}
\alias{rec_month<-}
\title{rec_month}
\usage{
rec_month(object, ...) <- value
}
\description{
rec_month
}
|
#' Take a variable bounded above/below/both and return an unbounded (normalized) variable.
#'
#' This transforms bounded variables so that they are not bounded.
#' First variables are coerced away from the boundaries. by a distance of \code{tol}.
#' The natural log is used for variables bounded either above or below but not both.
#' The inverse of the standard normal cumulative distribution function
#' (the quantile function) is used for variables bounded above and below.
#'
#' @param x A vector, matrix, array, or dataframe with value to be
#' coerced into a range or set.
#' @param constraints A list of constraints. See the examples below
#' for formatting details.
#' @param tol Variables will be forced to be at least this far away
#' from the boundaries.
#' @param trim If TRUE values in x < lower and values in x > upper
#' will be set to lower and upper, respectively, before normalizing.
#' @return An object of the same class as \code{x} with the values
#' transformed so that they spread out over any part of the real
#' line.
#'
#' A variable \code{x} that is bounded below by \code{lower} is
#' transformed to \code{log(x - lower)}.
#'
#' A variable \code{x} that is bounded above by \code{upper} is
#' transformed to \code{log(upper - x)}.
#'
#' A variable \code{x} that is bounded below by \code{lower} and
#' above by \code{upper} is transformed to
#' \code{qnorm((x-lower)/(upper - lower))}.
#' @export
#' @examples
#' constraints=list(lower=5) # lower bound when constrining to an interval
#' constraints=list(upper=10) # upper bound when constraining to an interval
#' constraints=list(lower=5, upper=10) # both lower and upper bounds
#' @author Stephen R. Haptonstahl \email{srh@@haptonstahl.org}
NormalizeBoundedVariable <-
function(x,
constraints,
tol=stats::pnorm(-5),
trim=TRUE
) {
if( is.null(constraints$lower) ) constraints$lower <- -Inf
if( is.null(constraints$upper) ) constraints$upper <- Inf
if( constraints$upper < constraints$lower ) stop("'upper' must be greater than 'lower.'")
if( trim ) {
x <- pmax(x, constraints$lower)
x <- pmin(x, constraints$upper)
} else {
if( min(x) < constraints$lower ) stop("All values in x must be greater than or equal to the lower bound.")
if( max(x) > constraints$upper ) stop("All values in x must be less than or equal to the upper bound.")
}
if( is.finite(constraints$lower) & is.finite(constraints$upper) & tol > (constraints$upper - constraints$lower)/2) {
stop("'tol' must be less than half the distance between upper and lower bounds.")
}
# force values away from boundaries
if( is.finite(constraints$lower) ) x <- pmax(constraints$lower + tol, x)
if( is.finite(constraints$upper) ) x <- pmin(constraints$upper - tol, x)
if( is.infinite(constraints$lower) && is.infinite(constraints$upper) ) {
# not bounded; degenerate case
return( x )
} else if( is.infinite(constraints$lower) ) {
# only bounded above
return( log(constraints$upper - x) )
} else if( is.infinite(constraints$upper) ) {
# only bounded below
return( log(x - constraints$lower) )
} else {
# bounded above and below
return( stats::qnorm((x-constraints$lower)/(constraints$upper-constraints$lower)) )
}
}
| /dev/FastImputation/R/NormalizeBoundedVariable.R | no_license | shaptonstahl/FastImputation | R | false | false | 3,374 | r | #' Take a variable bounded above/below/both and return an unbounded (normalized) variable.
#'
#' This transforms bounded variables so that they are not bounded.
#' First variables are coerced away from the boundaries. by a distance of \code{tol}.
#' The natural log is used for variables bounded either above or below but not both.
#' The inverse of the standard normal cumulative distribution function
#' (the quantile function) is used for variables bounded above and below.
#'
#' @param x A vector, matrix, array, or dataframe with value to be
#' coerced into a range or set.
#' @param constraints A list of constraints. See the examples below
#' for formatting details.
#' @param tol Variables will be forced to be at least this far away
#' from the boundaries.
#' @param trim If TRUE values in x < lower and values in x > upper
#' will be set to lower and upper, respectively, before normalizing.
#' @return An object of the same class as \code{x} with the values
#' transformed so that they spread out over any part of the real
#' line.
#'
#' A variable \code{x} that is bounded below by \code{lower} is
#' transformed to \code{log(x - lower)}.
#'
#' A variable \code{x} that is bounded above by \code{upper} is
#' transformed to \code{log(upper - x)}.
#'
#' A variable \code{x} that is bounded below by \code{lower} and
#' above by \code{upper} is transformed to
#' \code{qnorm((x-lower)/(upper - lower))}.
#' @export
#' @examples
#' constraints=list(lower=5) # lower bound when constrining to an interval
#' constraints=list(upper=10) # upper bound when constraining to an interval
#' constraints=list(lower=5, upper=10) # both lower and upper bounds
#' @author Stephen R. Haptonstahl \email{srh@@haptonstahl.org}
NormalizeBoundedVariable <-
function(x,
constraints,
tol=stats::pnorm(-5),
trim=TRUE
) {
if( is.null(constraints$lower) ) constraints$lower <- -Inf
if( is.null(constraints$upper) ) constraints$upper <- Inf
if( constraints$upper < constraints$lower ) stop("'upper' must be greater than 'lower.'")
if( trim ) {
x <- pmax(x, constraints$lower)
x <- pmin(x, constraints$upper)
} else {
if( min(x) < constraints$lower ) stop("All values in x must be greater than or equal to the lower bound.")
if( max(x) > constraints$upper ) stop("All values in x must be less than or equal to the upper bound.")
}
if( is.finite(constraints$lower) & is.finite(constraints$upper) & tol > (constraints$upper - constraints$lower)/2) {
stop("'tol' must be less than half the distance between upper and lower bounds.")
}
# force values away from boundaries
if( is.finite(constraints$lower) ) x <- pmax(constraints$lower + tol, x)
if( is.finite(constraints$upper) ) x <- pmin(constraints$upper - tol, x)
if( is.infinite(constraints$lower) && is.infinite(constraints$upper) ) {
# not bounded; degenerate case
return( x )
} else if( is.infinite(constraints$lower) ) {
# only bounded above
return( log(constraints$upper - x) )
} else if( is.infinite(constraints$upper) ) {
# only bounded below
return( log(x - constraints$lower) )
} else {
# bounded above and below
return( stats::qnorm((x-constraints$lower)/(constraints$upper-constraints$lower)) )
}
}
|
# Name: long_all_data_test.R
# Auth: Umar Niazi u.niazi@imperial.ac.uk
# Date: 11/05/15
# Desc: analysis of all combined tb ma data
source('tb_biomarker_ma_header.R')
## data loading
# load the data, clean and create factors
dfExp = read.csv(file.choose(), header=T, row.names=1)
# load the sample annotation
dfSamples = read.csv(file.choose(), header=T)
# sort both the samples and expression data in same order
rownames(dfSamples) = as.character(dfSamples$Sample_ID)
dfSamples = dfSamples[colnames(dfExp),]
# create factors
fGroups = factor(dfSamples$Illness1)
# create a second factor with only 2 levels
# keep ptb at 1 for downstream predictions
fGroups.2 = as.character(dfSamples$Illness)
fGroups.2 = factor(fGroups.2, levels = c('OD', 'ATB'))
dfSamples$fGroups.2 = fGroups.2
## data quality checks
m = dfExp
# pca on samples i.e. covariance matrix of m
pr.out = prcomp(t(m), scale=T)
fSamples = dfSamples$Illness1
col.p = rainbow(length(unique(fSamples)))
col = col.p[as.numeric(fSamples)]
# plot the pca components
plot.new()
legend('center', legend = unique(fSamples), fill=col.p[as.numeric(unique(fSamples))])
par(mfrow=c(2,2))
plot(pr.out$x[,1:2], col=col, pch=19, xlab='Z1', ylab='Z2',
main='PCA comp 1 and 2')
plot(pr.out$x[,c(1,3)], col=col, pch=19, xlab='Z1', ylab='Z3',
main='PCA comp 1 and 3')
plot(pr.out$x[,c(2,3)], col=col, pch=19, xlab='Z2', ylab='Z3',
main='PCA comp 2 and 3')
par(p.old)
f_Plot3DPCA(pr.out$x[,1:3], col, pch=19, xlab='Z1', ylab='Z2', zlab='Z3',
main='Plot of first 3 components')
par(p.old)
# remove the outlier groups from the data
# these can be seen on the pc2 and pc3 plots
m = pr.out$x[,1:3]
m = data.frame(m, fSamples)
i = which(m$PC1 > 130 & m$PC2 > 90)
i = unique(c(i, which(m$PC2 > 120 & m$PC3 > 0)))
i = unique(c(i, which(m$PC2 > 0 & m$PC3 < -100)))
c = col
c[i] = 'black'
## plot outlier groups
par(mfrow=c(2,2))
plot(pr.out$x[,1:2], col=c, pch=19, xlab='Z1', ylab='Z2',
main='PCA comp 1 and 2')
plot(pr.out$x[,c(1,3)], col=c, pch=19, xlab='Z1', ylab='Z3',
main='PCA comp 1 and 3')
plot(pr.out$x[,c(2,3)], col=c, pch=19, xlab='Z2', ylab='Z3',
main='PCA comp 2 and 3')
par(p.old)
f_Plot3DPCA(pr.out$x[,1:3], c, pch=19, xlab='Z1', ylab='Z2', zlab='Z3',
main='Plot of first 3 components')
par(p.old)
# remove the outliers
cvOutliers = rownames(m)[i]
m = match(cvOutliers, colnames(dfExp))
dfExp = dfExp[,-m]
m = match(cvOutliers, dfSamples$Sample_ID)
dfSamples = dfSamples[-m,]
gc()
### analysis
## extract data
# count matrix
mDat = as.matrix(dfExp)
# phenotypic data
fGroups.2 = dfSamples$fGroups.2
## data cleaning and formatting before statistical analysis
# remove any rows with NAN data
f = is.finite(rowSums(mDat))
table(f)
mDat = mDat[f,]
# scale across samples i.e. along columns
mDat = scale(mDat)
# select a subset of genes based on coefficient of variation.
mDat = t(mDat)
# calculate the coef of var for each gene
cv = apply(mDat, 2, function(x) sd(x)/abs(mean(x)))
# check cv
summary(cv)
# cut data into groups based on quantiles of cv
cut.pts = quantile(cv, probs = 0:10/10)
groups = cut(cv, breaks = cut.pts, include.lowest = T, labels = 0:9)
iMean = apply(mDat, 2, mean)
iVar = apply(mDat, 2, var)
coplot((cv) ~ iMean | groups)
coplot(iVar ~ iMean | groups)
# choose genes with small cv
# f = cv <= 0.2
# choosing groups from quantile 0 to 40
mDat = mDat[,groups %in% c(0, 1, 2, 3)]
# select a subset of genes that show differential expression
p.t = apply(mDat, 2, function(x) t.test(x ~ fGroups.2)$p.value)
p.w = apply(mDat, 2, function(x) wilcox.test(x ~ fGroups.2)$p.value)
p.t.adj = p.adjust(p.t, 'BH')
p.w.adj = p.adjust(p.w, 'BH')
t = names(p.t.adj[p.t.adj < 0.1])
w = names(p.w.adj[p.w.adj < 0.1])
n = unique(c(w, t))
f1 = n %in% t
f2 = n %in% w
f = f1 & f2
n2 = n[f]
mDat.sub = mDat[, colnames(mDat) %in% n2]
# keep 20% of data as test set
test = sample(1:nrow(dfSamples), size = nrow(dfSamples) * 0.2, replace = F)
dfSamples.train = dfSamples[-test,]
dfSamples.test = dfSamples[test,]
mDat.sub.train = mDat.sub[-test,]
mDat.sub.test = mDat.sub[test,]
lData = list(test=test, sample=dfSamples, expression=mDat.sub)
lData$desc = 'Longs data set for including test set vector'
save(lData, file='Objects/long_data_set.rds')
### model fitting and variable selection
## use random forest on training data for variable selection
dfData = as.data.frame(mDat.sub.train)
dfData$fGroups.2 = dfSamples.train$fGroups.2
set.seed(1)
rf.fit.1 = randomForest(fGroups.2 ~., data=dfData, importance = TRUE)
# save the results to save time for next time
dir.create('Objects', showWarnings = F)
save(rf.fit.1, file='Objects/long.rf.fit.1.rds')
# variables of importance
varImpPlot(rf.fit.1)
dfImportance = as.data.frame(importance(rf.fit.1))
dfImportance = dfImportance[order(dfImportance$MeanDecreaseAccuracy, decreasing = T),]
hist(dfImportance$MeanDecreaseAccuracy)
dfImportance.ATB = dfImportance[order(dfImportance$ATB, decreasing = T),]
# select the top few genes looking at the distribution of error rates
### TAG 1
# choose the top proteins for ATB
hist(dfImportance.ATB$ATB)
f = which(dfImportance.ATB$ATB >= 2)
length(f)
cvTopGenes = rownames(dfImportance.ATB)[f]
# subset the data based on these selected genes from training dataset
dfData = as.data.frame(mDat.sub.train)
dfData = dfData[,colnames(dfData) %in% cvTopGenes]
dfData$fGroups = dfSamples.train$fGroups.2
### Further variable classification check
### using CV and ROC
dfData.full = dfData
iBoot = 20
## as the 2 class proportions are not equal, fit random forest multiple times on random samples
## containing equal proportions of both classes and check variable importance measures
# fit random forests multiple times
# store results
lVarImp = vector('list', iBoot)
for (i in 1:iBoot) {
# get indices of the particular factors in data table
ind.o = which(dfData.full$fGroups == 'OD')
ind.p = which(dfData.full$fGroups == 'ATB')
# take sample of equal in size from group OD and ATB
ind.o.s = sample(ind.o, size = length(ind.p), replace = F)
# sample of ATB groups, i.e. take everything as it is smaller group
ind.p.s = sample(ind.p, size=length(ind.p), replace=F)
# join the sample indices together
ind = sample(c(ind.o.s, ind.p.s), replace=F)
# take sample from the full dataset
dfData = dfData.full[ind,]
# fit model
fit.rf = randomForest(fGroups ~., data=dfData, importance = TRUE, ntree = 500)
# get variables importance
df = importance(fit.rf)
df = df[order(df[,'MeanDecreaseAccuracy'], decreasing = T),]
# put in list
lVarImp[[i]] = df
} # for
## put data for each boot of each variable together in a dataframe
df = NULL
for (i in 1:iBoot) df = rbind(df, lVarImp[[i]])
# convert rownames i.e. gene names to factors
f = as.factor(rownames(df))
# calculate mean and sd for each gene
ivMean = tapply(df[,'MeanDecreaseAccuracy'], f, mean)
ivSD = tapply(df[,'MeanDecreaseAccuracy'], f, sd)
df = as.data.frame(df)
df$Symbol = rownames(df)
dfRF.boot = df
# boxplots
par(mar=c(6,4,3,2)+0.1)
boxplot(df$MeanDecreaseAccuracy ~ df$Symbol, las=2)
# calculate coefficient of variation
cv = ivSD/abs(ivMean)
# split data into groups based on cv
g = cut(cv, breaks = quantile(cv, 0:10/10), include.lowest = T)
coplot(ivSD ~ ivMean | g)
gl = cut(cv, breaks = quantile(cv, 0:10/10), include.lowest = T, labels = 0:9)
rm(dfData)
rm(dfData.full)
par(p.old)
dfRF.boot.stats = data.frame(ivMean, ivSD, cv, groups=g, group.lab=gl)
## Decide on a cutoff here
## based on coefficient of variation
cvTopGenes.step.1 = cvTopGenes
f = cv[gl %in% c(0, 1)]
cvTopGenes = names(f)
## look at the correlation of the genes to remove colinear genes
dfData = as.data.frame(mDat.sub.train)
dfData = dfData[,colnames(dfData) %in% cvTopGenes]
#dfData$fGroups = dfSamples.train$fGroups.2
mCor = cor(dfData)
i = findCorrelation(mCor, cutoff = 0.7)
n = colnames(mCor)[i]
# remove these correlated features
cvTopGenes.step.2 = cvTopGenes
i = which(cvTopGenes %in% n)
cvTopGenes = cvTopGenes[-i]
rm(dfData)
## check for the miminum sized model using test and training sets
## use variable selection method
dfData.train = as.data.frame(mDat.sub.train)
dfData.train = dfData.train[,colnames(dfData.train) %in% cvTopGenes]
dfData.train$fGroups = dfSamples.train$fGroups.2
# create a test set on half the data
test = sample(c(T,F), size =nrow(dfData.train), replace = T)
dfData.test = dfData.train[test,]
dfData.train = dfData.train[!test,]
reg = regsubsets(fGroups ~ ., data=dfData.train, nvmax = length(cvTopGenes), method='exhaustive')
plot(reg, scale='bic')
# test for validation errors in the test set
ivCV.train = rep(NA, length=length(cvTopGenes))
ivCV.test = rep(NA, length=length(cvTopGenes))
for (i in 1:length(cvTopGenes)){
# get the genes in each subset
n = names(coef(reg, i))[-1]
n = c(n, 'fGroups')
dfDat.train = dfData.train[,colnames(dfData.train) %in% n]
dfDat.test = dfData.test[,colnames(dfData.test) %in% n]
# fit the lda model on training dataset
fit.lda = lda(fGroups ~ ., data=dfDat.train)
# test error rate on test dataset
p = predict(fit.lda, newdata=dfDat.test)
# calculate test error
ivCV.test[i] = mean(p$class != dfDat.test$fGroups)
# calculate training error
p = predict(fit.lda, newdata=dfDat.train)
# calculate error
ivCV.train[i] = mean(p$class != dfDat.train$fGroups)
}
# test error rate
m = cbind(test=ivCV.test, train=ivCV.train)
matplot(1:nrow(m), m, type='l', lty=1, main='test/training error rate', xlab='number of var', ylab='error')
legend('topright', legend = colnames(m), lty=1, col=1:2)
## choose the best model after refitting on the full training data set
## choose which model is the best?
i = which.min(ivCV.test)[1]
# refit subset using i number of genes on all data
dfData = rbind(dfData.test, dfData.train)
reg = regsubsets(fGroups ~ ., data=dfData, nvmax = length(cvTopGenes), method='exhaustive')
# choose these variables
cvTopGenes.step.3 = cvTopGenes
cvTopGenes = names(coef(reg, i))[-1]
rm(list = c('dfData', 'dfData.train', 'dfData.test'))
### cross validation with ROC
#### CV with ROC
# choose all data together for nested 10 fold cv
dfData = as.data.frame(mDat.sub.train)
dfData = dfData[,colnames(dfData) %in% cvTopGenes]
dfData$fGroups = dfSamples.train$fGroups.2
### Further variable classification check
### using CV and ROC
dfData.full = dfData
set.seed(1)
lPred = vector(mode = 'list', length = 50)
lLab = vector(mode = 'list', length=50)
iCv.error = NULL
for (oo in 1:50){
t.lPred = NULL
t.lLab = NULL
# select a subset of equal numbers for others and SA
ind.o = which(dfData.full$fGroups == 'OD')
ind.p = which(dfData.full$fGroups == 'ATB')
ind.o.s = sample(ind.o, size = length(ind.p), replace = F)
ind.p.s = sample(ind.p, size=length(ind.p), replace=F)
ind = sample(c(ind.o.s, ind.p.s), replace=F)
dfData = dfData.full[ind,]
for (o in 1:1){
# perform 10 fold cross validation
k = 10
folds = sample(1:k, nrow(dfData), replace = T, prob = rep(1/k, times=k))
# choose the fold to fit and test the model
for (i in 1:k){
# check if selected fold leads to 0 for a class
if ((length(unique(dfData$fGroups[folds != i])) < 2) || (length(unique(dfData$fGroups[folds == i])) < 2)) next
# check if fold too small to fit model
if (nrow(dfData[folds != i,]) < 3) next
# fit model on data not in fold
fit = lda(fGroups ~ ., data=dfData[folds != i,])
# predict on data in fold
pred = predict(fit, newdata = dfData[folds == i,])$posterior[,'ATB']
name = paste('pred',oo, o, i,sep='.' )
t.lPred[[name]] = pred
name = paste('label',oo,o, i,sep='.' )
t.lLab[[name]] = dfData$fGroups[folds == i] == 'ATB'
pred = predict(fit, newdata = dfData[folds == i,])$class
iCv.error = append(iCv.error, mean(pred != dfData$fGroups[folds == i]))
}
}
t.lPred = unlist(t.lPred)
t.lLab = unlist(t.lLab)
lPred[[oo]] = t.lPred
lLab[[oo]] = t.lLab
}
pred = prediction(lPred, lLab)
perf = performance(pred, 'tpr', 'fpr')
auc = performance(pred, 'auc')
plot(perf, main=paste('ROC Prediction of for', 'ATB'),
spread.estimate='stddev', avg='vertical', spread.scale=2)
auc.cv = paste('auc=', signif(mean(as.numeric(auc@y.values)), digits = 3))
cv.err = paste('CV Error=', signif(mean(iCv.error), 3))
#legend('bottomright', legend = c(auc, cv))
abline(0, 1, lty=2)
## fit model and roc without cross validation, just on test and training data
dfData.train = as.data.frame(mDat.sub.train)
dfData.train = dfData.train[,colnames(dfData.train) %in% cvTopGenes]
dfData.train$fGroups = dfSamples.train$fGroups.2
dfData.test = as.data.frame(mDat.sub.test)
dfData.test = dfData.test[,colnames(dfData.test) %in% cvTopGenes]
dfData.test$fGroups = dfSamples.test$fGroups.2
fit = lda(fGroups ~ ., data=dfData.train)
# predict on data in fold
pred = predict(fit, newdata = dfData.test)$posterior[,'ATB']
ivPred = pred
ivLab = dfData.test$fGroups == 'ATB'
pred = predict(fit, newdata = dfData.test)$class
iCv.error = mean(pred != dfData.test$fGroups)
pred = prediction(ivPred, ivLab)
perf = performance(pred, 'tpr', 'fpr')
auc = performance(pred, 'auc')
plot(perf, add=T, lty=3, lwd=2, col=2)#main=paste('ROC Prediction of for', 'SA'))
auc.t = paste('t.auc=', signif(mean(as.numeric(auc@y.values)), digits = 3))
err.t = paste('t Error=', signif(mean(iCv.error), 3))
legend('bottomright', legend = c(auc.cv, cv.err, auc.t, err.t))
abline(0, 1, lty=2)
## plot these expression values for these genes
par(mfrow=c(1,2))
x = stack(dfData.train)
x$f = dfData.train$fGroups
boxplot(values ~ f+ind, data=x, las=2, par=par(mar=c(8, 4, 2, 2)+0.1), main='Training Data')
x = stack(dfData.test)
x$f = dfData.test$fGroups
boxplot(values ~ f+ind, data=x, las=2, par=par(mar=c(8, 4, 2, 2)+0.1), main='Test Data')
| /long_all_data_test.R | permissive | uhkniazi/TB_biomarker_ma | R | false | false | 13,804 | r | # Name: long_all_data_test.R
# Auth: Umar Niazi u.niazi@imperial.ac.uk
# Date: 11/05/15
# Desc: analysis of all combined tb ma data
source('tb_biomarker_ma_header.R')
## data loading
# load the data, clean and create factors
dfExp = read.csv(file.choose(), header=T, row.names=1)
# load the sample annotation
dfSamples = read.csv(file.choose(), header=T)
# sort both the samples and expression data in same order
rownames(dfSamples) = as.character(dfSamples$Sample_ID)
dfSamples = dfSamples[colnames(dfExp),]
# create factors
fGroups = factor(dfSamples$Illness1)
# create a second factor with only 2 levels
# keep ptb at 1 for downstream predictions
fGroups.2 = as.character(dfSamples$Illness)
fGroups.2 = factor(fGroups.2, levels = c('OD', 'ATB'))
dfSamples$fGroups.2 = fGroups.2
## data quality checks
m = dfExp
# pca on samples i.e. covariance matrix of m
pr.out = prcomp(t(m), scale=T)
fSamples = dfSamples$Illness1
col.p = rainbow(length(unique(fSamples)))
col = col.p[as.numeric(fSamples)]
# plot the pca components
plot.new()
legend('center', legend = unique(fSamples), fill=col.p[as.numeric(unique(fSamples))])
par(mfrow=c(2,2))
plot(pr.out$x[,1:2], col=col, pch=19, xlab='Z1', ylab='Z2',
main='PCA comp 1 and 2')
plot(pr.out$x[,c(1,3)], col=col, pch=19, xlab='Z1', ylab='Z3',
main='PCA comp 1 and 3')
plot(pr.out$x[,c(2,3)], col=col, pch=19, xlab='Z2', ylab='Z3',
main='PCA comp 2 and 3')
par(p.old)
f_Plot3DPCA(pr.out$x[,1:3], col, pch=19, xlab='Z1', ylab='Z2', zlab='Z3',
main='Plot of first 3 components')
par(p.old)
# remove the outlier groups from the data
# these can be seen on the pc2 and pc3 plots
m = pr.out$x[,1:3]
m = data.frame(m, fSamples)
i = which(m$PC1 > 130 & m$PC2 > 90)
i = unique(c(i, which(m$PC2 > 120 & m$PC3 > 0)))
i = unique(c(i, which(m$PC2 > 0 & m$PC3 < -100)))
c = col
c[i] = 'black'
## plot outlier groups
par(mfrow=c(2,2))
plot(pr.out$x[,1:2], col=c, pch=19, xlab='Z1', ylab='Z2',
main='PCA comp 1 and 2')
plot(pr.out$x[,c(1,3)], col=c, pch=19, xlab='Z1', ylab='Z3',
main='PCA comp 1 and 3')
plot(pr.out$x[,c(2,3)], col=c, pch=19, xlab='Z2', ylab='Z3',
main='PCA comp 2 and 3')
par(p.old)
f_Plot3DPCA(pr.out$x[,1:3], c, pch=19, xlab='Z1', ylab='Z2', zlab='Z3',
main='Plot of first 3 components')
par(p.old)
# remove the outliers
cvOutliers = rownames(m)[i]
m = match(cvOutliers, colnames(dfExp))
dfExp = dfExp[,-m]
m = match(cvOutliers, dfSamples$Sample_ID)
dfSamples = dfSamples[-m,]
gc()
### analysis
## extract data
# count matrix
mDat = as.matrix(dfExp)
# phenotypic data
fGroups.2 = dfSamples$fGroups.2
## data cleaning and formatting before statistical analysis
# remove any rows with NAN data
f = is.finite(rowSums(mDat))
table(f)
mDat = mDat[f,]
# scale across samples i.e. along columns
mDat = scale(mDat)
# select a subset of genes based on coefficient of variation.
mDat = t(mDat)
# calculate the coef of var for each gene
cv = apply(mDat, 2, function(x) sd(x)/abs(mean(x)))
# check cv
summary(cv)
# cut data into groups based on quantiles of cv
cut.pts = quantile(cv, probs = 0:10/10)
groups = cut(cv, breaks = cut.pts, include.lowest = T, labels = 0:9)
iMean = apply(mDat, 2, mean)
iVar = apply(mDat, 2, var)
coplot((cv) ~ iMean | groups)
coplot(iVar ~ iMean | groups)
# choose genes with small cv
# f = cv <= 0.2
# choosing groups from quantile 0 to 40
mDat = mDat[,groups %in% c(0, 1, 2, 3)]
# select a subset of genes that show differential expression
p.t = apply(mDat, 2, function(x) t.test(x ~ fGroups.2)$p.value)
p.w = apply(mDat, 2, function(x) wilcox.test(x ~ fGroups.2)$p.value)
p.t.adj = p.adjust(p.t, 'BH')
p.w.adj = p.adjust(p.w, 'BH')
t = names(p.t.adj[p.t.adj < 0.1])
w = names(p.w.adj[p.w.adj < 0.1])
n = unique(c(w, t))
f1 = n %in% t
f2 = n %in% w
f = f1 & f2
n2 = n[f]
mDat.sub = mDat[, colnames(mDat) %in% n2]
# keep 20% of data as test set
test = sample(1:nrow(dfSamples), size = nrow(dfSamples) * 0.2, replace = F)
dfSamples.train = dfSamples[-test,]
dfSamples.test = dfSamples[test,]
mDat.sub.train = mDat.sub[-test,]
mDat.sub.test = mDat.sub[test,]
lData = list(test=test, sample=dfSamples, expression=mDat.sub)
lData$desc = 'Longs data set for including test set vector'
save(lData, file='Objects/long_data_set.rds')
### model fitting and variable selection
## use random forest on training data for variable selection
dfData = as.data.frame(mDat.sub.train)
dfData$fGroups.2 = dfSamples.train$fGroups.2
set.seed(1)
rf.fit.1 = randomForest(fGroups.2 ~., data=dfData, importance = TRUE)
# save the results to save time for next time
dir.create('Objects', showWarnings = F)
save(rf.fit.1, file='Objects/long.rf.fit.1.rds')
# variables of importance
varImpPlot(rf.fit.1)
dfImportance = as.data.frame(importance(rf.fit.1))
dfImportance = dfImportance[order(dfImportance$MeanDecreaseAccuracy, decreasing = T),]
hist(dfImportance$MeanDecreaseAccuracy)
dfImportance.ATB = dfImportance[order(dfImportance$ATB, decreasing = T),]
# select the top few genes looking at the distribution of error rates
### TAG 1
# choose the top proteins for ATB
hist(dfImportance.ATB$ATB)
f = which(dfImportance.ATB$ATB >= 2)
length(f)
cvTopGenes = rownames(dfImportance.ATB)[f]
# subset the data based on these selected genes from training dataset
dfData = as.data.frame(mDat.sub.train)
dfData = dfData[,colnames(dfData) %in% cvTopGenes]
dfData$fGroups = dfSamples.train$fGroups.2
### Further variable classification check
### using CV and ROC
dfData.full = dfData
iBoot = 20
## as the 2 class proportions are not equal, fit random forest multiple times on random samples
## containing equal proportions of both classes and check variable importance measures
# fit random forests multiple times
# store results
lVarImp = vector('list', iBoot)
for (i in 1:iBoot) {
# get indices of the particular factors in data table
ind.o = which(dfData.full$fGroups == 'OD')
ind.p = which(dfData.full$fGroups == 'ATB')
# take sample of equal in size from group OD and ATB
ind.o.s = sample(ind.o, size = length(ind.p), replace = F)
# sample of ATB groups, i.e. take everything as it is smaller group
ind.p.s = sample(ind.p, size=length(ind.p), replace=F)
# join the sample indices together
ind = sample(c(ind.o.s, ind.p.s), replace=F)
# take sample from the full dataset
dfData = dfData.full[ind,]
# fit model
fit.rf = randomForest(fGroups ~., data=dfData, importance = TRUE, ntree = 500)
# get variables importance
df = importance(fit.rf)
df = df[order(df[,'MeanDecreaseAccuracy'], decreasing = T),]
# put in list
lVarImp[[i]] = df
} # for
## put data for each boot of each variable together in a dataframe
df = NULL
for (i in 1:iBoot) df = rbind(df, lVarImp[[i]])
# convert rownames i.e. gene names to factors
f = as.factor(rownames(df))
# calculate mean and sd for each gene
ivMean = tapply(df[,'MeanDecreaseAccuracy'], f, mean)
ivSD = tapply(df[,'MeanDecreaseAccuracy'], f, sd)
df = as.data.frame(df)
df$Symbol = rownames(df)
dfRF.boot = df
# boxplots
par(mar=c(6,4,3,2)+0.1)
boxplot(df$MeanDecreaseAccuracy ~ df$Symbol, las=2)
# calculate coefficient of variation
cv = ivSD/abs(ivMean)
# split data into groups based on cv
g = cut(cv, breaks = quantile(cv, 0:10/10), include.lowest = T)
coplot(ivSD ~ ivMean | g)
gl = cut(cv, breaks = quantile(cv, 0:10/10), include.lowest = T, labels = 0:9)
rm(dfData)
rm(dfData.full)
par(p.old)
dfRF.boot.stats = data.frame(ivMean, ivSD, cv, groups=g, group.lab=gl)
## Decide on a cutoff here
## based on coefficient of variation
cvTopGenes.step.1 = cvTopGenes
f = cv[gl %in% c(0, 1)]
cvTopGenes = names(f)
## look at the correlation of the genes to remove colinear genes
dfData = as.data.frame(mDat.sub.train)
dfData = dfData[,colnames(dfData) %in% cvTopGenes]
#dfData$fGroups = dfSamples.train$fGroups.2
mCor = cor(dfData)
i = findCorrelation(mCor, cutoff = 0.7)
n = colnames(mCor)[i]
# remove these correlated features
cvTopGenes.step.2 = cvTopGenes
i = which(cvTopGenes %in% n)
cvTopGenes = cvTopGenes[-i]
rm(dfData)
## check for the miminum sized model using test and training sets
## use variable selection method
dfData.train = as.data.frame(mDat.sub.train)
dfData.train = dfData.train[,colnames(dfData.train) %in% cvTopGenes]
dfData.train$fGroups = dfSamples.train$fGroups.2
# create a test set on half the data
test = sample(c(T,F), size =nrow(dfData.train), replace = T)
dfData.test = dfData.train[test,]
dfData.train = dfData.train[!test,]
reg = regsubsets(fGroups ~ ., data=dfData.train, nvmax = length(cvTopGenes), method='exhaustive')
plot(reg, scale='bic')
# test for validation errors in the test set
ivCV.train = rep(NA, length=length(cvTopGenes))
ivCV.test = rep(NA, length=length(cvTopGenes))
for (i in 1:length(cvTopGenes)){
# get the genes in each subset
n = names(coef(reg, i))[-1]
n = c(n, 'fGroups')
dfDat.train = dfData.train[,colnames(dfData.train) %in% n]
dfDat.test = dfData.test[,colnames(dfData.test) %in% n]
# fit the lda model on training dataset
fit.lda = lda(fGroups ~ ., data=dfDat.train)
# test error rate on test dataset
p = predict(fit.lda, newdata=dfDat.test)
# calculate test error
ivCV.test[i] = mean(p$class != dfDat.test$fGroups)
# calculate training error
p = predict(fit.lda, newdata=dfDat.train)
# calculate error
ivCV.train[i] = mean(p$class != dfDat.train$fGroups)
}
# test error rate
m = cbind(test=ivCV.test, train=ivCV.train)
matplot(1:nrow(m), m, type='l', lty=1, main='test/training error rate', xlab='number of var', ylab='error')
legend('topright', legend = colnames(m), lty=1, col=1:2)
## choose the best model after refitting on the full training data set
## choose which model is the best?
i = which.min(ivCV.test)[1]
# refit subset using i number of genes on all data
dfData = rbind(dfData.test, dfData.train)
reg = regsubsets(fGroups ~ ., data=dfData, nvmax = length(cvTopGenes), method='exhaustive')
# choose these variables
cvTopGenes.step.3 = cvTopGenes
cvTopGenes = names(coef(reg, i))[-1]
rm(list = c('dfData', 'dfData.train', 'dfData.test'))
### cross validation with ROC
#### CV with ROC
# choose all data together for nested 10 fold cv
dfData = as.data.frame(mDat.sub.train)
dfData = dfData[,colnames(dfData) %in% cvTopGenes]
dfData$fGroups = dfSamples.train$fGroups.2
### Further variable classification check
### using CV and ROC
dfData.full = dfData
set.seed(1)
lPred = vector(mode = 'list', length = 50)
lLab = vector(mode = 'list', length=50)
iCv.error = NULL
for (oo in 1:50){
t.lPred = NULL
t.lLab = NULL
# select a subset of equal numbers for others and SA
ind.o = which(dfData.full$fGroups == 'OD')
ind.p = which(dfData.full$fGroups == 'ATB')
ind.o.s = sample(ind.o, size = length(ind.p), replace = F)
ind.p.s = sample(ind.p, size=length(ind.p), replace=F)
ind = sample(c(ind.o.s, ind.p.s), replace=F)
dfData = dfData.full[ind,]
for (o in 1:1){
# perform 10 fold cross validation
k = 10
folds = sample(1:k, nrow(dfData), replace = T, prob = rep(1/k, times=k))
# choose the fold to fit and test the model
for (i in 1:k){
# check if selected fold leads to 0 for a class
if ((length(unique(dfData$fGroups[folds != i])) < 2) || (length(unique(dfData$fGroups[folds == i])) < 2)) next
# check if fold too small to fit model
if (nrow(dfData[folds != i,]) < 3) next
# fit model on data not in fold
fit = lda(fGroups ~ ., data=dfData[folds != i,])
# predict on data in fold
pred = predict(fit, newdata = dfData[folds == i,])$posterior[,'ATB']
name = paste('pred',oo, o, i,sep='.' )
t.lPred[[name]] = pred
name = paste('label',oo,o, i,sep='.' )
t.lLab[[name]] = dfData$fGroups[folds == i] == 'ATB'
pred = predict(fit, newdata = dfData[folds == i,])$class
iCv.error = append(iCv.error, mean(pred != dfData$fGroups[folds == i]))
}
}
t.lPred = unlist(t.lPred)
t.lLab = unlist(t.lLab)
lPred[[oo]] = t.lPred
lLab[[oo]] = t.lLab
}
pred = prediction(lPred, lLab)
perf = performance(pred, 'tpr', 'fpr')
auc = performance(pred, 'auc')
plot(perf, main=paste('ROC Prediction of for', 'ATB'),
spread.estimate='stddev', avg='vertical', spread.scale=2)
auc.cv = paste('auc=', signif(mean(as.numeric(auc@y.values)), digits = 3))
cv.err = paste('CV Error=', signif(mean(iCv.error), 3))
#legend('bottomright', legend = c(auc, cv))
abline(0, 1, lty=2)
## fit model and roc without cross validation, just on test and training data
dfData.train = as.data.frame(mDat.sub.train)
dfData.train = dfData.train[,colnames(dfData.train) %in% cvTopGenes]
dfData.train$fGroups = dfSamples.train$fGroups.2
dfData.test = as.data.frame(mDat.sub.test)
dfData.test = dfData.test[,colnames(dfData.test) %in% cvTopGenes]
dfData.test$fGroups = dfSamples.test$fGroups.2
fit = lda(fGroups ~ ., data=dfData.train)
# predict on data in fold
pred = predict(fit, newdata = dfData.test)$posterior[,'ATB']
ivPred = pred
ivLab = dfData.test$fGroups == 'ATB'
pred = predict(fit, newdata = dfData.test)$class
iCv.error = mean(pred != dfData.test$fGroups)
pred = prediction(ivPred, ivLab)
perf = performance(pred, 'tpr', 'fpr')
auc = performance(pred, 'auc')
plot(perf, add=T, lty=3, lwd=2, col=2)#main=paste('ROC Prediction of for', 'SA'))
auc.t = paste('t.auc=', signif(mean(as.numeric(auc@y.values)), digits = 3))
err.t = paste('t Error=', signif(mean(iCv.error), 3))
legend('bottomright', legend = c(auc.cv, cv.err, auc.t, err.t))
abline(0, 1, lty=2)
## plot these expression values for these genes
par(mfrow=c(1,2))
x = stack(dfData.train)
x$f = dfData.train$fGroups
boxplot(values ~ f+ind, data=x, las=2, par=par(mar=c(8, 4, 2, 2)+0.1), main='Training Data')
x = stack(dfData.test)
x$f = dfData.test$fGroups
boxplot(values ~ f+ind, data=x, las=2, par=par(mar=c(8, 4, 2, 2)+0.1), main='Test Data')
|
# Need to install then load gplots library
#print("Need to issue library(gplots)", quote=FALSE)
colourList=c("black", "darkred", "darkblue", "darkgreen", "magenta", "brown");
#Root name for all files
#rootName="TSEgscholar111117short"
#rootName="TSEWoSresearcherid"
#rootName="PendryWoSresearcherid"
#rootNameList=c("TSEWoSresearcherid","TSEgscholar111117short")
#dataLabel=c("WoS","gScholar")
#outputRootName="TSEWoSgScholar"
rootNameList=c("PendryWoSresearcherid")
dataLabel=c("WoS")
outputRootName="PendryWoS"
plotsOn=FALSE
OSWindows=TRUE
screenOn=TRUE
pdfOn=TRUE
epsOn=TRUE
pngOn=TRUE
squareAxesOn=TRUE
hOtherValuesOn=FALSE
readBibData <- function(rootName){
fileName <- paste(rootName,".dat",sep="")
df <- read.table(fileName, header=TRUE, sep="\t", fill=TRUE);
hvalue=0;
h12value=0;
h21value=0;
for (ppp in 1:length(df$Rank)){
ccc=df$Citations[ppp]
rrr=df$Rank[ppp]
if (ccc>=rrr) hvalue=max(rrr,hvalue)
if (ccc>=2*rrr) h21value=max(rrr,h21value)
if (2*ccc>=rrr) h12value=max(rrr,h12value)
}
print(paste(rootName,"has h=",hvalue,", h12=",h12value,", h21=",h21value),quote=FALSE)
outputList <-list(Citations=df$Citations,Rank=df$Rank, hvalue=hvalue, h12value=h12value, h21value=h21value)
}
citeList <- list()
refList <- list()
outputList <-list()
for (iii in 1:length(rootNameList)) {
outputList[[iii]]<-readBibData(rootNameList[iii])
#cmax <- (trunc(max(outputList[[iii]]$Citations)/10)+1)*10
#rmax <- length(outputList[[iii]]$Rank)
cmax <- 10^(trunc(log10(max(outputList[[iii]]$Citations)))+1)*1.05
rmax <- length(outputList[[iii]]$Rank)
if (squareAxesOn) {
vmax=max(cmax,rmax)
cmax=vmax
rmax=vmax
}
}
xlabel="Rank"
ylabel="Citations"
#if (OSWindows) windows() else quartz()
#barplot(outputList[[1]]$Citations, beside=TRUE, xlim=c(0,rmax), ylim=c(0,cmax), names.arg=outputList[[1]]$Rank, xaxs = "i", yaxs = "i",
# xlab=xlabel, ylab=ylabel )
# lines(1:rmax,1:rmax,lty=1)
# outputList[[1]]$hvalue=11
# lines(c(0,outputList[[1]]$hvalue),c(outputList[[1]]$hvalue,outputList[[1]]$hvalue),lty=2)
# lines(c(outputList[[1]]$hvalue+0.5,outputList[[1]]$hvalue+0.5),c(0,outputList[[1]]$hvalue),lty=2)
# generic plot function
mainPlot <- function(){
cexValue=1.5
plot(x=NULL, y=NULL, log="xy", xlim=c(0.9,rmax), ylim=c(0.9,cmax), xaxs = "i", yaxs = "i",
xlab=xlabel, ylab=ylabel, cex=cexValue )
for (iii in 1:length(outputList)){
points(outputList[[iii]]$Rank, outputList[[iii]]$Citations, cex=1.5, col=colourList[1+iii], pch=iii )
lines(c(1,outputList[[iii]]$hvalue),c(outputList[[iii]]$hvalue,outputList[[iii]]$hvalue),lty=1+iii, col=colourList[1+iii])
lines(c(outputList[[iii]]$hvalue,outputList[[iii]]$hvalue),c(1,outputList[[iii]]$hvalue),lty=1+iii, col=colourList[1+iii])
}
lines(1:rmax,1:cmax,lty=1)
text(0.505*rmax,0.5*cmax,"h", pos=1, cex=cexValue )
if (hOtherValuesOn) {
lines(1:rmax/2,1:cmax,lty=3)
h12label=expression(paste("h"["1:2"]))
text(0.7*rmax/2,0.7*cmax,h12label, pos=2, cex=cexValue )
h21label=expression(paste("h"["2:1"]))
text(rmax*0.72,0.7*cmax/2,h21label, pos=1, cex=cexValue )
}
#legend (x=rmax*0.6,y=cmax/10,
legend (x="bottomright",y=NULL, dataLabel[1:length(outputList)], col=colourList[1+1:length(outputList)],lty=1+1:length(outputList),pch=1:length(outputList), cex=cexValue);
}
# end of generic plot function
if (screenOn){
if (OSWindows) windows() else quartz()
#print(paste(graphName,"on screen"), quote=FALSE)
mainPlot()
#abline(v=outputList[[1]]$hvalue, lty=2)
}
# EPS plot, for iGraph and fonts see see http://lists.gnu.org/archive/html/igraph-help/2007-07/msg00010.html
if (epsOn){
epsFileName<- paste(outputRootName,"bar.eps",sep="")
print(paste("eps plotting",epsFileName), quote=FALSE)
postscript(epsFileName, horizontal=FALSE, onefile=FALSE, height=6, width=6, pointsize=16, fonts=c("serif", "Palatino"))
#postscript(epsFileName, fonts=c("serif", "Palatino"))
mainPlot()
dev.off(which = dev.cur())
}
# PDF plot, for iGraph and fonts see see http://lists.gnu.org/archive/html/igraph-help/2007-07/msg00010.html
if (pdfOn){
pdfFileName<- paste(outputRootName,"bar.pdf",sep="")
print(paste("pdf plotting",pdfFileName), quote=FALSE)
pdf(pdfFileName, onefile=FALSE, height=6, width=6, pointsize=16, fonts=c("serif", "Palatino"))
mainPlot()
dev.off(which = dev.cur())
}
# PNG plot
if (pngOn){
pngFileName<- paste(outputRootName,"bar.png",sep="")
print(paste("png plotting",pngFileName), quote=FALSE)
png(pngFileName, height=480, width=480, pointsize=12)
mainPlot()
dev.off(which = dev.cur())
}
| /R/ImperialPapers/ICcitations/biblog2.r | no_license | xuzhikethinker/PRG | R | false | false | 4,786 | r | # Need to install then load gplots library
#print("Need to issue library(gplots)", quote=FALSE)
colourList=c("black", "darkred", "darkblue", "darkgreen", "magenta", "brown");
#Root name for all files
#rootName="TSEgscholar111117short"
#rootName="TSEWoSresearcherid"
#rootName="PendryWoSresearcherid"
#rootNameList=c("TSEWoSresearcherid","TSEgscholar111117short")
#dataLabel=c("WoS","gScholar")
#outputRootName="TSEWoSgScholar"
rootNameList=c("PendryWoSresearcherid")
dataLabel=c("WoS")
outputRootName="PendryWoS"
plotsOn=FALSE
OSWindows=TRUE
screenOn=TRUE
pdfOn=TRUE
epsOn=TRUE
pngOn=TRUE
squareAxesOn=TRUE
hOtherValuesOn=FALSE
readBibData <- function(rootName){
fileName <- paste(rootName,".dat",sep="")
df <- read.table(fileName, header=TRUE, sep="\t", fill=TRUE);
hvalue=0;
h12value=0;
h21value=0;
for (ppp in 1:length(df$Rank)){
ccc=df$Citations[ppp]
rrr=df$Rank[ppp]
if (ccc>=rrr) hvalue=max(rrr,hvalue)
if (ccc>=2*rrr) h21value=max(rrr,h21value)
if (2*ccc>=rrr) h12value=max(rrr,h12value)
}
print(paste(rootName,"has h=",hvalue,", h12=",h12value,", h21=",h21value),quote=FALSE)
outputList <-list(Citations=df$Citations,Rank=df$Rank, hvalue=hvalue, h12value=h12value, h21value=h21value)
}
citeList <- list()
refList <- list()
outputList <-list()
for (iii in 1:length(rootNameList)) {
outputList[[iii]]<-readBibData(rootNameList[iii])
#cmax <- (trunc(max(outputList[[iii]]$Citations)/10)+1)*10
#rmax <- length(outputList[[iii]]$Rank)
cmax <- 10^(trunc(log10(max(outputList[[iii]]$Citations)))+1)*1.05
rmax <- length(outputList[[iii]]$Rank)
if (squareAxesOn) {
vmax=max(cmax,rmax)
cmax=vmax
rmax=vmax
}
}
xlabel="Rank"
ylabel="Citations"
#if (OSWindows) windows() else quartz()
#barplot(outputList[[1]]$Citations, beside=TRUE, xlim=c(0,rmax), ylim=c(0,cmax), names.arg=outputList[[1]]$Rank, xaxs = "i", yaxs = "i",
# xlab=xlabel, ylab=ylabel )
# lines(1:rmax,1:rmax,lty=1)
# outputList[[1]]$hvalue=11
# lines(c(0,outputList[[1]]$hvalue),c(outputList[[1]]$hvalue,outputList[[1]]$hvalue),lty=2)
# lines(c(outputList[[1]]$hvalue+0.5,outputList[[1]]$hvalue+0.5),c(0,outputList[[1]]$hvalue),lty=2)
# generic plot function
mainPlot <- function(){
cexValue=1.5
plot(x=NULL, y=NULL, log="xy", xlim=c(0.9,rmax), ylim=c(0.9,cmax), xaxs = "i", yaxs = "i",
xlab=xlabel, ylab=ylabel, cex=cexValue )
for (iii in 1:length(outputList)){
points(outputList[[iii]]$Rank, outputList[[iii]]$Citations, cex=1.5, col=colourList[1+iii], pch=iii )
lines(c(1,outputList[[iii]]$hvalue),c(outputList[[iii]]$hvalue,outputList[[iii]]$hvalue),lty=1+iii, col=colourList[1+iii])
lines(c(outputList[[iii]]$hvalue,outputList[[iii]]$hvalue),c(1,outputList[[iii]]$hvalue),lty=1+iii, col=colourList[1+iii])
}
lines(1:rmax,1:cmax,lty=1)
text(0.505*rmax,0.5*cmax,"h", pos=1, cex=cexValue )
if (hOtherValuesOn) {
lines(1:rmax/2,1:cmax,lty=3)
h12label=expression(paste("h"["1:2"]))
text(0.7*rmax/2,0.7*cmax,h12label, pos=2, cex=cexValue )
h21label=expression(paste("h"["2:1"]))
text(rmax*0.72,0.7*cmax/2,h21label, pos=1, cex=cexValue )
}
#legend (x=rmax*0.6,y=cmax/10,
legend (x="bottomright",y=NULL, dataLabel[1:length(outputList)], col=colourList[1+1:length(outputList)],lty=1+1:length(outputList),pch=1:length(outputList), cex=cexValue);
}
# end of generic plot function
if (screenOn){
if (OSWindows) windows() else quartz()
#print(paste(graphName,"on screen"), quote=FALSE)
mainPlot()
#abline(v=outputList[[1]]$hvalue, lty=2)
}
# EPS plot, for iGraph and fonts see see http://lists.gnu.org/archive/html/igraph-help/2007-07/msg00010.html
if (epsOn){
epsFileName<- paste(outputRootName,"bar.eps",sep="")
print(paste("eps plotting",epsFileName), quote=FALSE)
postscript(epsFileName, horizontal=FALSE, onefile=FALSE, height=6, width=6, pointsize=16, fonts=c("serif", "Palatino"))
#postscript(epsFileName, fonts=c("serif", "Palatino"))
mainPlot()
dev.off(which = dev.cur())
}
# PDF plot, for iGraph and fonts see see http://lists.gnu.org/archive/html/igraph-help/2007-07/msg00010.html
if (pdfOn){
pdfFileName<- paste(outputRootName,"bar.pdf",sep="")
print(paste("pdf plotting",pdfFileName), quote=FALSE)
pdf(pdfFileName, onefile=FALSE, height=6, width=6, pointsize=16, fonts=c("serif", "Palatino"))
mainPlot()
dev.off(which = dev.cur())
}
# PNG plot
if (pngOn){
pngFileName<- paste(outputRootName,"bar.png",sep="")
print(paste("png plotting",pngFileName), quote=FALSE)
png(pngFileName, height=480, width=480, pointsize=12)
mainPlot()
dev.off(which = dev.cur())
}
|
library(gridGraphics)
segments1 <- function() {
set.seed(1)
x <- stats::runif(12); y <- stats::rnorm(12)
i <- order(x, y); x <- x[i]; y <- y[i]
plot(x, y, main = "arrows(.) and segments(.)")
## draw arrows from point to point :
s <- seq(length(x)-1) # one shorter than data
arrows(x[s], y[s], x[s+1], y[s+1], col= 1:3)
s <- s[-length(s)]
segments(x[s], y[s], x[s+2], y[s+2], col= 'pink')
}
plotdiff(expression(segments1()), "segments-1")
plotdiffResult()
| /gridGraphics/test-scripts/test-segments.R | permissive | solgenomics/R_libs | R | false | false | 496 | r |
library(gridGraphics)
segments1 <- function() {
set.seed(1)
x <- stats::runif(12); y <- stats::rnorm(12)
i <- order(x, y); x <- x[i]; y <- y[i]
plot(x, y, main = "arrows(.) and segments(.)")
## draw arrows from point to point :
s <- seq(length(x)-1) # one shorter than data
arrows(x[s], y[s], x[s+1], y[s+1], col= 1:3)
s <- s[-length(s)]
segments(x[s], y[s], x[s+2], y[s+2], col= 'pink')
}
plotdiff(expression(segments1()), "segments-1")
plotdiffResult()
|
## Note that the data starts at 2006-12-16 17:24:00 and increments one minute per row
## there are 60 seconds per minute. The analysis is to cover only two dates, 2/1/2007 and 2/2/2007
## there are 1440 minutes in day, 2880 minutes in two days
## to save time reading the data, this program only reads in the data that is needed
bdate <- strptime("16/12/2006 17:24:00", "%d/%m/%Y %T")
dstart <- strptime("1/02/2007 00:00:00", "%d/%m/%Y %T")
skipcount <- as.integer((as.numeric(dstart)-as.numeric(bdate))/60)
readrows <- 2*1440
file <- "household_power_consumption.txt"
## get data set, seperate character is a semicolon, use the first row as the column names, treat '?' as NA
## only get the'readrows'+'skipcount number of rows, as they contain up through the rows that are to be assessed
dat <- read.table(file, sep = ";", header = TRUE, nrows = (skipcount + readrows), na.strings = "?")
## subset the data to only get the date for the dates that are to be assessed
subdat <- dat[(skipcount+1):(skipcount+readrows),]
## convert the Date and Time columns from a factor to POSITlt and store it back in the Time column
subdat$Time <- strptime(paste(subdat$Date, subdat$Time),"%d/%m/%Y %T")
## fix the Time column to show that it now has the Date and Time
colnames(subdat)[1] <- "Date_Time"
## note - leave the Date column as is
## 2
png("plot2.png", width = 480, height = 480)
plot(subdat[,2], subdat$Global_active_power, ylab = "Global Active Power - (kilowatts)", xlab ="", type="l")
dev.off()
| /plot2.R | no_license | devanssjc/ExData_Plotting1 | R | false | false | 1,494 | r | ## Note that the data starts at 2006-12-16 17:24:00 and increments one minute per row
## there are 60 seconds per minute. The analysis is to cover only two dates, 2/1/2007 and 2/2/2007
## there are 1440 minutes in day, 2880 minutes in two days
## to save time reading the data, this program only reads in the data that is needed
bdate <- strptime("16/12/2006 17:24:00", "%d/%m/%Y %T")
dstart <- strptime("1/02/2007 00:00:00", "%d/%m/%Y %T")
skipcount <- as.integer((as.numeric(dstart)-as.numeric(bdate))/60)
readrows <- 2*1440
file <- "household_power_consumption.txt"
## get data set, seperate character is a semicolon, use the first row as the column names, treat '?' as NA
## only get the'readrows'+'skipcount number of rows, as they contain up through the rows that are to be assessed
dat <- read.table(file, sep = ";", header = TRUE, nrows = (skipcount + readrows), na.strings = "?")
## subset the data to only get the date for the dates that are to be assessed
subdat <- dat[(skipcount+1):(skipcount+readrows),]
## convert the Date and Time columns from a factor to POSITlt and store it back in the Time column
subdat$Time <- strptime(paste(subdat$Date, subdat$Time),"%d/%m/%Y %T")
## fix the Time column to show that it now has the Date and Time
colnames(subdat)[1] <- "Date_Time"
## note - leave the Date column as is
## 2
png("plot2.png", width = 480, height = 480)
plot(subdat[,2], subdat$Global_active_power, ylab = "Global Active Power - (kilowatts)", xlab ="", type="l")
dev.off()
|
library(OPDOE)
### Name: size_c.three_way
### Title: Three-way analysis of variance - several cross-, nested and
### mixed classifications.
### Aliases: 'size_c.three_way_cross.model_3_a '
### 'size_c.three_way_cross.model_3_axb '
### 'size_c.three_way_mixed_ab_in_c.model_5_a '
### 'size_c.three_way_mixed_ab_in_c.model_5_axb '
### 'size_c.three_way_mixed_ab_in_c.model_5_b '
### 'size_c.three_way_mixed_ab_in_c.model_6_b '
### 'size_c.three_way_mixed_cxbina.model_5_a '
### 'size_c.three_way_mixed_cxbina.model_5_b '
### 'size_c.three_way_mixed_cxbina.model_7_b '
### 'size_c.three_way_nested.model_5_a '
### 'size_c.three_way_nested.model_5_b '
### 'size_c.three_way_nested.model_7_b '
### Keywords: anova
### ** Examples
size_c.three_way_cross.model_3_a(0.05, 0.1, 0.5, 6, 5, 2, "maximin")
size_c.three_way_cross.model_3_a(0.05, 0.1, 0.5, 6, 5, 2, "minimin")
size_c.three_way_cross.model_3_axb(0.05, 0.1, 0.5, 6, 5, 2, "maximin")
size_c.three_way_cross.model_3_axb(0.05, 0.1, 0.5, 6, 5, 2, "minimin")
size_c.three_way_mixed_ab_in_c.model_5_a(0.05, 0.1, 0.5, 6, 5, 1, "maximin")
size_c.three_way_mixed_ab_in_c.model_5_a(0.05, 0.1, 0.5, 6, 5, 1, "minimin")
size_c.three_way_mixed_ab_in_c.model_5_axb(0.05, 0.1, 0.5, 6, 5, 1, "maximin")
size_c.three_way_mixed_ab_in_c.model_5_axb(0.05, 0.1, 0.5, 6, 5, 1, "minimin")
size_c.three_way_mixed_ab_in_c.model_5_b(0.05, 0.1, 0.5, 6, 5, 1, "maximin")
size_c.three_way_mixed_ab_in_c.model_5_b(0.05, 0.1, 0.5, 6, 5, 1, "minimin")
size_c.three_way_mixed_ab_in_c.model_6_b(0.05, 0.1, 0.5, 6, 5, 1, "maximin")
size_c.three_way_mixed_ab_in_c.model_6_b(0.05, 0.1, 0.5, 6, 5, 1, "minimin")
size_c.three_way_mixed_cxbina.model_5_a(0.05, 0.1, 0.5, 6, 5, 2, "maximin")
size_c.three_way_mixed_cxbina.model_5_a(0.05, 0.1, 0.5, 6, 5, 2, "minimin")
size_c.three_way_mixed_cxbina.model_5_b(0.05, 0.1, 0.5, 6, 5, 2, "maximin")
size_c.three_way_mixed_cxbina.model_5_b(0.05, 0.1, 0.5, 6, 5, 2, "minimin")
size_c.three_way_mixed_cxbina.model_7_b(0.05, 0.1, 0.5, 6, 5, 2, "maximin")
size_c.three_way_mixed_cxbina.model_7_b(0.05, 0.1, 0.5, 6, 5, 2, "minimin")
size_c.three_way_nested.model_5_a(0.05, 0.1, 0.5, 6, 5, 2, "maximin")
size_c.three_way_nested.model_5_a(0.05, 0.1, 0.5, 6, 5, 2, "minimin")
size_c.three_way_nested.model_5_b(0.05, 0.1, 0.5, 6, 5, 2, "maximin")
size_c.three_way_nested.model_5_b(0.05, 0.1, 0.5, 6, 5, 2, "minimin")
size_c.three_way_nested.model_7_b(0.05, 0.1, 0.5, 6, 4, 1, "maximin")
size_c.three_way_nested.model_7_b(0.05, 0.1, 0.5, 6, 4, 1, "minimin")
| /data/genthat_extracted_code/OPDOE/examples/size_c.three_way.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 2,544 | r | library(OPDOE)
### Name: size_c.three_way
### Title: Three-way analysis of variance - several cross-, nested and
### mixed classifications.
### Aliases: 'size_c.three_way_cross.model_3_a '
### 'size_c.three_way_cross.model_3_axb '
### 'size_c.three_way_mixed_ab_in_c.model_5_a '
### 'size_c.three_way_mixed_ab_in_c.model_5_axb '
### 'size_c.three_way_mixed_ab_in_c.model_5_b '
### 'size_c.three_way_mixed_ab_in_c.model_6_b '
### 'size_c.three_way_mixed_cxbina.model_5_a '
### 'size_c.three_way_mixed_cxbina.model_5_b '
### 'size_c.three_way_mixed_cxbina.model_7_b '
### 'size_c.three_way_nested.model_5_a '
### 'size_c.three_way_nested.model_5_b '
### 'size_c.three_way_nested.model_7_b '
### Keywords: anova
### ** Examples
size_c.three_way_cross.model_3_a(0.05, 0.1, 0.5, 6, 5, 2, "maximin")
size_c.three_way_cross.model_3_a(0.05, 0.1, 0.5, 6, 5, 2, "minimin")
size_c.three_way_cross.model_3_axb(0.05, 0.1, 0.5, 6, 5, 2, "maximin")
size_c.three_way_cross.model_3_axb(0.05, 0.1, 0.5, 6, 5, 2, "minimin")
size_c.three_way_mixed_ab_in_c.model_5_a(0.05, 0.1, 0.5, 6, 5, 1, "maximin")
size_c.three_way_mixed_ab_in_c.model_5_a(0.05, 0.1, 0.5, 6, 5, 1, "minimin")
size_c.three_way_mixed_ab_in_c.model_5_axb(0.05, 0.1, 0.5, 6, 5, 1, "maximin")
size_c.three_way_mixed_ab_in_c.model_5_axb(0.05, 0.1, 0.5, 6, 5, 1, "minimin")
size_c.three_way_mixed_ab_in_c.model_5_b(0.05, 0.1, 0.5, 6, 5, 1, "maximin")
size_c.three_way_mixed_ab_in_c.model_5_b(0.05, 0.1, 0.5, 6, 5, 1, "minimin")
size_c.three_way_mixed_ab_in_c.model_6_b(0.05, 0.1, 0.5, 6, 5, 1, "maximin")
size_c.three_way_mixed_ab_in_c.model_6_b(0.05, 0.1, 0.5, 6, 5, 1, "minimin")
size_c.three_way_mixed_cxbina.model_5_a(0.05, 0.1, 0.5, 6, 5, 2, "maximin")
size_c.three_way_mixed_cxbina.model_5_a(0.05, 0.1, 0.5, 6, 5, 2, "minimin")
size_c.three_way_mixed_cxbina.model_5_b(0.05, 0.1, 0.5, 6, 5, 2, "maximin")
size_c.three_way_mixed_cxbina.model_5_b(0.05, 0.1, 0.5, 6, 5, 2, "minimin")
size_c.three_way_mixed_cxbina.model_7_b(0.05, 0.1, 0.5, 6, 5, 2, "maximin")
size_c.three_way_mixed_cxbina.model_7_b(0.05, 0.1, 0.5, 6, 5, 2, "minimin")
size_c.three_way_nested.model_5_a(0.05, 0.1, 0.5, 6, 5, 2, "maximin")
size_c.three_way_nested.model_5_a(0.05, 0.1, 0.5, 6, 5, 2, "minimin")
size_c.three_way_nested.model_5_b(0.05, 0.1, 0.5, 6, 5, 2, "maximin")
size_c.three_way_nested.model_5_b(0.05, 0.1, 0.5, 6, 5, 2, "minimin")
size_c.three_way_nested.model_7_b(0.05, 0.1, 0.5, 6, 4, 1, "maximin")
size_c.three_way_nested.model_7_b(0.05, 0.1, 0.5, 6, 4, 1, "minimin")
|
testlist <- list(lambda = numeric(0), nu = numeric(0), tol = 0, x = c(NaN, NaN, 2.11218004253591e-319, NaN, NaN, 3.23785921002061e-319, 0), ymax = 0)
result <- do.call(COMPoissonReg:::pcmp_cpp,testlist)
str(result) | /COMPoissonReg/inst/testfiles/pcmp_cpp/libFuzzer_pcmp_cpp/pcmp_cpp_valgrind_files/1612728389-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 216 | r | testlist <- list(lambda = numeric(0), nu = numeric(0), tol = 0, x = c(NaN, NaN, 2.11218004253591e-319, NaN, NaN, 3.23785921002061e-319, 0), ymax = 0)
result <- do.call(COMPoissonReg:::pcmp_cpp,testlist)
str(result) |
#Farmer Problem in LP
#A farmer plans to plant two crops, A and B. The cost of cultivating crop A is $40/acre, whereas the cost of crop B is $60/acre. The farmer has a maximum of $7400 available for land cultivation. Each acre of crop A requires 20 labor-hours and each acre of crop B requires 25 labor-hours. The farmer has a maximum of 3300 labor-hours available. If she expects to make a profit of $150/acre on crop A and $200/acre on crop B, how many acres of each crop should she plant in order to maximize her profit?
library(lpSolveAPI)
#First we create an empty model x.
?make.lp
#two variables ie. crops A & B: find which crop to be grown how much to max profit
lprecF1 <- make.lp(0, 2)
lprecF1
#Profit :: 150A + 200B
set.objfn(lprecF1, c(150, 200))
lprecF1
#Change from min to max problem
lp.control(lprecF1, sense="max")
lprecF1
#answer required in integer or real no for A & B: default Real
lprecF1
#1st Constraint : Budget Avl
#40x + 60y <= 7400
add.constraint(lprecF1, c(40,60), "<=", 7400)
lprecF1
#2nd constraint : Labour Hours Avl
#20x + 25y <= 3300
add.constraint(lprecF1, c(20,25), "<=", 3300)
lprecF1
#set lower limits : A & B > 0
set.bounds(lprecF1, lower = c(0, 0), columns = c(1, 2))
lprecF1
#upper bounds can also be set only for 1 or more columns
#set.bounds(lprec, upper = c(200), columns = 2)
ColNames <- c("CropA", "CropB")
RowNames <- c("Budget", "Labor")
dimnames(lprecF1) <- list(RowNames, ColNames)
lprecF1
solve(lprecF1) #if 0 then solution found
#get.dual.solution(lprec)
get.objective(lprecF1) # profit achieved
get.variables(lprecF1) #how much of each crop A & B
150* 65 + 200 * 80
get.constraints(lprecF1) #constraints of budget & labor used
plot(lprecF1) # print graphical output : only when type is real
#if type is integer, the plot will not work
print(lprecF1) #see the model
#add more constraints like water
#35x + 40y <= 10000
add.constraint(lprecF1, c(5,10), "<=", 1000)
lprecF1
delete.constraint(lprecF1, 3)
solve(lprecF1) #if 0 then solution found
get.objective(lprecF1) # profit achieved
get.variables(lprecF1) #how much of each crop A & B
#setting integer value
set.type(lprecF1, c(1,2), type = c("integer"))
lprecF1
solve(lprecF1) #if 0 then solution found
get.objective(lprecF1) # profit achieved
get.variables(lprecF1) #how much of each crop A & B
#http://lpsolve.sourceforge.net/5.5/R.htm
?lp
?lp.assign
?lp.object
?lp.transport
?print.lp
| /03-wksp1/5e5-LP-farmer1.R | no_license | bakul86/analytics | R | false | false | 2,416 | r | #Farmer Problem in LP
#A farmer plans to plant two crops, A and B. The cost of cultivating crop A is $40/acre, whereas the cost of crop B is $60/acre. The farmer has a maximum of $7400 available for land cultivation. Each acre of crop A requires 20 labor-hours and each acre of crop B requires 25 labor-hours. The farmer has a maximum of 3300 labor-hours available. If she expects to make a profit of $150/acre on crop A and $200/acre on crop B, how many acres of each crop should she plant in order to maximize her profit?
library(lpSolveAPI)
#First we create an empty model x.
?make.lp
#two variables ie. crops A & B: find which crop to be grown how much to max profit
lprecF1 <- make.lp(0, 2)
lprecF1
#Profit :: 150A + 200B
set.objfn(lprecF1, c(150, 200))
lprecF1
#Change from min to max problem
lp.control(lprecF1, sense="max")
lprecF1
#answer required in integer or real no for A & B: default Real
lprecF1
#1st Constraint : Budget Avl
#40x + 60y <= 7400
add.constraint(lprecF1, c(40,60), "<=", 7400)
lprecF1
#2nd constraint : Labour Hours Avl
#20x + 25y <= 3300
add.constraint(lprecF1, c(20,25), "<=", 3300)
lprecF1
#set lower limits : A & B > 0
set.bounds(lprecF1, lower = c(0, 0), columns = c(1, 2))
lprecF1
#upper bounds can also be set only for 1 or more columns
#set.bounds(lprec, upper = c(200), columns = 2)
ColNames <- c("CropA", "CropB")
RowNames <- c("Budget", "Labor")
dimnames(lprecF1) <- list(RowNames, ColNames)
lprecF1
solve(lprecF1) #if 0 then solution found
#get.dual.solution(lprec)
get.objective(lprecF1) # profit achieved
get.variables(lprecF1) #how much of each crop A & B
150* 65 + 200 * 80
get.constraints(lprecF1) #constraints of budget & labor used
plot(lprecF1) # print graphical output : only when type is real
#if type is integer, the plot will not work
print(lprecF1) #see the model
#add more constraints like water
#35x + 40y <= 10000
add.constraint(lprecF1, c(5,10), "<=", 1000)
lprecF1
delete.constraint(lprecF1, 3)
solve(lprecF1) #if 0 then solution found
get.objective(lprecF1) # profit achieved
get.variables(lprecF1) #how much of each crop A & B
#setting integer value
set.type(lprecF1, c(1,2), type = c("integer"))
lprecF1
solve(lprecF1) #if 0 then solution found
get.objective(lprecF1) # profit achieved
get.variables(lprecF1) #how much of each crop A & B
#http://lpsolve.sourceforge.net/5.5/R.htm
?lp
?lp.assign
?lp.object
?lp.transport
?print.lp
|
# Numbers Ruby test
puts 1+2 | /numbers.rd | no_license | crisfrulla/learn_ruby | R | false | false | 28 | rd | # Numbers Ruby test
puts 1+2 |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weekday.R
\name{weekday-arithmetic}
\alias{weekday-arithmetic}
\alias{add_days.clock_weekday}
\title{Arithmetic: weekday}
\usage{
\method{add_days}{clock_weekday}(x, n, ...)
}
\arguments{
\item{x}{\verb{[clock_weekday]}
A weekday vector.}
\item{n}{\verb{[integer / clock_duration]}
An integer vector to be converted to a duration, or a duration
corresponding to the arithmetic function being used. This corresponds
to the number of duration units to add. \code{n} may be negative to subtract
units of duration.}
\item{...}{These dots are for future extensions and must be empty.}
}
\value{
\code{x} after performing the arithmetic.
}
\description{
These are weekday methods for the
\link[=clock-arithmetic]{arithmetic generics}.
\itemize{
\item \code{add_days()}
}
Also check out the examples on the \code{\link[=weekday]{weekday()}} page for more advanced
usage.
}
\details{
\code{x} and \code{n} are recycled against each other.
}
\examples{
saturday <- weekday(clock_weekdays$saturday)
saturday
add_days(saturday, 1)
add_days(saturday, 2)
}
| /man/weekday-arithmetic.Rd | permissive | isabella232/clock-2 | R | false | true | 1,128 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weekday.R
\name{weekday-arithmetic}
\alias{weekday-arithmetic}
\alias{add_days.clock_weekday}
\title{Arithmetic: weekday}
\usage{
\method{add_days}{clock_weekday}(x, n, ...)
}
\arguments{
\item{x}{\verb{[clock_weekday]}
A weekday vector.}
\item{n}{\verb{[integer / clock_duration]}
An integer vector to be converted to a duration, or a duration
corresponding to the arithmetic function being used. This corresponds
to the number of duration units to add. \code{n} may be negative to subtract
units of duration.}
\item{...}{These dots are for future extensions and must be empty.}
}
\value{
\code{x} after performing the arithmetic.
}
\description{
These are weekday methods for the
\link[=clock-arithmetic]{arithmetic generics}.
\itemize{
\item \code{add_days()}
}
Also check out the examples on the \code{\link[=weekday]{weekday()}} page for more advanced
usage.
}
\details{
\code{x} and \code{n} are recycled against each other.
}
\examples{
saturday <- weekday(clock_weekdays$saturday)
saturday
add_days(saturday, 1)
add_days(saturday, 2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dsl.r
\name{splash_add_lua}
\alias{splash_add_lua}
\title{Add raw lua code into DSL call chain}
\usage{
splash_add_lua(splash_obj, lua_code)
}
\arguments{
\item{splash_obj}{splashr object}
\item{lua_code}{length 1 character vector of raw \code{lua} code}
}
\description{
The \code{splashr} \code{lua} DSL (domain specific language) wrapper wraps what the package
author believes to be the most common/useful \code{lua} functions. Users of the package
may have need to insert some custom \code{lua} code within a DSL call chain they are
building. You can insert any Splash \code{lua} code you like with this function call.
}
\details{
The code is inserted at the position the \code{splash_add_lua}() is called in the chain
which will be within the main "splash' function which is defined as:\preformatted{function main(splash)
...
end
}
If you need more flexibility, use the \code{\link[=execute_lua]{execute_lua()}} function.
}
\seealso{
Other splash_dsl_functions: \code{\link{splash_click}},
\code{\link{splash_focus}}, \code{\link{splash_go}},
\code{\link{splash_har_reset}}, \code{\link{splash_har}},
\code{\link{splash_html}}, \code{\link{splash_png}},
\code{\link{splash_press}}, \code{\link{splash_release}},
\code{\link{splash_send_keys}},
\code{\link{splash_send_text}}, \code{\link{splash_wait}}
}
| /man/splash_add_lua.Rd | no_license | nikolayvoronchikhin/splashr | R | false | true | 1,402 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dsl.r
\name{splash_add_lua}
\alias{splash_add_lua}
\title{Add raw lua code into DSL call chain}
\usage{
splash_add_lua(splash_obj, lua_code)
}
\arguments{
\item{splash_obj}{splashr object}
\item{lua_code}{length 1 character vector of raw \code{lua} code}
}
\description{
The \code{splashr} \code{lua} DSL (domain specific language) wrapper wraps what the package
author believes to be the most common/useful \code{lua} functions. Users of the package
may have need to insert some custom \code{lua} code within a DSL call chain they are
building. You can insert any Splash \code{lua} code you like with this function call.
}
\details{
The code is inserted at the position the \code{splash_add_lua}() is called in the chain
which will be within the main "splash' function which is defined as:\preformatted{function main(splash)
...
end
}
If you need more flexibility, use the \code{\link[=execute_lua]{execute_lua()}} function.
}
\seealso{
Other splash_dsl_functions: \code{\link{splash_click}},
\code{\link{splash_focus}}, \code{\link{splash_go}},
\code{\link{splash_har_reset}}, \code{\link{splash_har}},
\code{\link{splash_html}}, \code{\link{splash_png}},
\code{\link{splash_press}}, \code{\link{splash_release}},
\code{\link{splash_send_keys}},
\code{\link{splash_send_text}}, \code{\link{splash_wait}}
}
|
#' @import methods
#' @import SharedObject
#' @import SummarizedExperiment SummarizedExperiment
#' @importClassesFrom S4Vectors SimpleList Rle LLint
#' @importClassesFrom SummarizedExperiment SummarizedExperiment Assays SimpleAssays
#' @importClassesFrom IRanges IRanges CompressedAtomicList
#' @importClassesFrom GenomicRanges GRanges
# @useDynLib SharedObjectUtility, .registration = TRUE
# @importFrom Rcpp sourceCpp
NULL
| /R/zzz.R | no_license | Jiefei-Wang/SharedObjectUtility | R | false | false | 425 | r | #' @import methods
#' @import SharedObject
#' @import SummarizedExperiment SummarizedExperiment
#' @importClassesFrom S4Vectors SimpleList Rle LLint
#' @importClassesFrom SummarizedExperiment SummarizedExperiment Assays SimpleAssays
#' @importClassesFrom IRanges IRanges CompressedAtomicList
#' @importClassesFrom GenomicRanges GRanges
# @useDynLib SharedObjectUtility, .registration = TRUE
# @importFrom Rcpp sourceCpp
NULL
|
#process bionano scaffolds
angusmashmap=read.delim("bostaurus_angus_vs_sire_cleaned_assembly.mashmap",sep=" ",header=F)
brahmanmashmap=read.delim("bostaurus_brahma_vs_dam_cleaned_assembly.mashmap",sep=" ",header=F)
angusbionano=read.table("EXP_REFINEFINAL1_bppAdjust_cmap_bostaurus_angus_fasta_NGScontigs_HYBRID_SCAFFOLD.agp")
brahmanbionano=read.table("EXP_REFINEFINAL1_bppAdjust_cmap_bostaurus_brahma_fasta_NGScontigs_HYBRID_SCAFFOLD.agp")
load("Downloads/sire_agp_clean_assembly_to_salsa.RData")
load("Downloads/dam_agp_clean_assembly_to_salsa.RData")
#########################################################################################
#X
#########################################################################################
x=read.table("contig_order_v4.txt",se="\t")
newx=apply(as.matrix(x[,1]),1,function(x){strsplit(x,".fa")[[1]][1]})
X=cbind(newx,x[,2])
brahmanmashmapX=brahmanmashmap[brahmanmashmap[,6]%in%X[,1],]
brahmanmashmapX=brahmanmashmapX[match(X[,1],brahmanmashmapX[,6]),]
brahmanmashmapXtig=apply(as.matrix(brahmanmashmapX[,1]),1,function(x){strsplit(x,"\\|")[[1]][1]})
brahmanmashmapXnew=cbind(brahmanmashmapX,brahmanmashmapXtig)
brahmanbionanotig=apply(as.matrix(brahmanbionano[,6]),1,function(x){strsplit(x,"\\|")[[1]][1]})
brahmanbionanotignew=cbind(brahmanbionano,brahmanbionanotig)
mergedbrahman=merge(brahmanmashmapXnew,brahmanbionanotignew,by.x="brahmanmashmapXtig",by.y="brahmanbionanotig")
supermergedbrahman=mergedbrahman[,c(1,3,4,5,6,7,12,13,14,15)]
m=match(X[,1],mergedbrahman[,7])
mergedbrahman=mergedbrahman[m,]
checking=brahmanbionano[!is.na(mm),]
checking1=as.matrix(checking[checking[,5]=="W",])
write.table(dam_scaffolds_FINAL_agp_modi[match(X[,1],dam_scaffolds_FINAL_agp_modi[,6]),][isna,] ,file="missingtable-Xmatchedbionano.txt",row.names=F,col.names=F,sep="\t")
write.table(dam_scaffolds_FINAL_agp_modi[match(X[,1],dam_scaffolds_FINAL_agp_modi[,6]),],file="finaltable-Xsalsa.txt",row.names=F,col.names=F,sep="\t")
write.table(supermergedbrahman,file="finaltable-Xmatchedbionano.txt",row.names=F,col.names=F,sep="\t")
write.csv(brahmanbionanotignew[brahmanbionanotignew[,10]=="tig00011795",],file="missedtig00011795.csv")
write.csv(brahmanbionanotignew[brahmanbionanotignew[,10]=="tig00003246",],file="missedtig00003246.csv")
write.csv(brahmanbionanotignew[brahmanbionanotignew[,10]=="tig00011886",],file="missedtig00011886.csv")
write.csv(brahmanbionanotignew[brahmanbionanotignew[,10]=="tig00011907",],file="missedtig00011907.csv")
write.csv(brahmanbionanotignew[brahmanbionanotignew[,10]=="tig00479033",],file="missedtig00479033.csv")
write.csv(brahmanbionanotignew[brahmanbionanotignew[,10]=="tig00011826",],file="missedtig00011826.csv")
#########################################################################################
#dam full
#########################################################################################
colnames(brahmanmashmap)[6]="contig"
brahmanmashmapXnew2=merge(brahmanmashmap,dam_scaffolds_FINAL_agp_modi,by.x="contig",by.y="component_id")
colnames(brahmanmashmapXnew2)[2]="tig"
brahmanbionanotig=apply(as.matrix(brahmanbionano[,6]),1,function(x){strsplit(x,"_")[[1]][1]})
brahmanbionanotignew=cbind(brahmanbionano,brahmanbionanotig)
mergedbrahman=merge(brahmanmashmapXnew2,brahmanbionanotignew,by.x="tig",by.y="brahmanbionanotig")
#########################################################################################
#Y
#########################################################################################
y=read.csv("sireY_scaffold_info.csv",header=F)
y=y[y[,5]=="A",]
newYcontig=sire_scaffolds_FINAL_agp_modi[sire_scaffolds_FINAL_agp_modi[,1]%in%y[,6],]
for(i in 1:nrow(Y)){
if(i==1){
newYcontigbase=sire_scaffolds_FINAL_agp_modi[sire_scaffolds_FINAL_agp_modi[,1]%in%y[1,6],]
oriation=rep(as.character(y[i,9]),nrow(newYcontigbase))
}else{
newYcontigbase2=sire_scaffolds_FINAL_agp_modi[sire_scaffolds_FINAL_agp_modi[,1]%in%y[i,6],]
oriation2=rep(as.character(y[i,9]),nrow(newYcontigbase2))
newYcontigbase=rbind(newYcontigbase,newYcontigbase2)
oriation=c(oriation,oriation2)
}
}
Y=cbind(newYcontigbase[,6],oriation)
angusmashmapY=angusmashmap[angusmashmap[,6]%in%Y[,1],]
angusmashmapY=angusmashmapY[match(Y[,1],angusmashmapY[,6]),]
angusmashmapYtig=apply(as.matrix(angusmashmapY[,1]),1,function(x){strsplit(x,"\\|")[[1]][1]})
angusmashmapYnew=cbind(angusmashmapY,angusmashmapYtig)
angusbionanotig=apply(as.matrix(angusbionano[,6]),1,function(x){strsplit(x,"\\|")[[1]][1]})
angusbionanotignew=cbind(angusbionano,angusbionanotig)
mergedangus=merge(angusmashmapYnew,angusbionanotignew,by.x="angusmashmapYtig",by.y="angusbionanotig")
supermergedangus=mergedangus[,c(1,3,4,5,6,7,12,13,14,15)]
m=match(Y[,1],supermergedangus[,6])
supermergedangus=supermergedangus[m,]
table(as.matrix(supermergedangus[,7]))
special=rownames(table(as.matrix(supermergedangus[,7])))[1:12]
> special
[1] "Super-Scaffold_100035" "Super-Scaffold_100072" "Super-Scaffold_100129" "Super-Scaffold_100133" "Super-Scaffold_100160" "Super-Scaffold_100217" "Super-Scaffold_100308" "Super-Scaffold_100367"
[9] "Super-Scaffold_100404" "Super-Scaffold_100406" "Super-Scaffold_100477" "Super-Scaffold_100494"
checking=brahmanbionano[!is.na(mm),]
checking1=as.matrix(checking[checking[,5]=="W",])
write.table(sire_scaffolds_FINAL_agp_modi[match(X[,1],sire_scaffolds_FINAL_agp_modi[,6]),][isna,] ,file="missingtable-Xmatchedbionano.txt",row.names=F,col.names=F,sep="\t")
write.table(newYcontigbase,file="finaltable-Ysalsa.txt",row.names=F,col.names=F,sep="\t")
write.table(supermergedangus,file="finaltable-Ymatchedbionano.txt",row.names=F,col.names=F,sep="\t")
superscaffold=angusbionano[angusbionano[,1]%in%rownames(table(as.character(as.matrix(supermergedangus[,7]))))[1:11],]
specialtig=superscaffold[superscaffold[,5]=="W",6]
specialtig=apply(as.matrix(specialtig),1,function(x){strsplit(x,"_")[[1]][1]})
specialtig=rownames(table(specialtig))
sire_scaffolds_FINAL_agp_modi[match(angusmashmap[match(specialtig,angusmashmap[,1]),6],sire_scaffolds_FINAL_agp_modi[,6]),]
rownames(table(as.character(as.matrix(supermergedangus[,7]))))[1:11]
angusbionano[angusbionano[,1]%in%rownames(table(as.character(as.matrix(supermergedangus[,7]))))[1:11],6]
| /processopticalmap-comparetoHiC.R | no_license | cynthialiu/CattleSexChromosomesAssembly | R | false | false | 6,484 | r | #process bionano scaffolds
angusmashmap=read.delim("bostaurus_angus_vs_sire_cleaned_assembly.mashmap",sep=" ",header=F)
brahmanmashmap=read.delim("bostaurus_brahma_vs_dam_cleaned_assembly.mashmap",sep=" ",header=F)
angusbionano=read.table("EXP_REFINEFINAL1_bppAdjust_cmap_bostaurus_angus_fasta_NGScontigs_HYBRID_SCAFFOLD.agp")
brahmanbionano=read.table("EXP_REFINEFINAL1_bppAdjust_cmap_bostaurus_brahma_fasta_NGScontigs_HYBRID_SCAFFOLD.agp")
load("Downloads/sire_agp_clean_assembly_to_salsa.RData")
load("Downloads/dam_agp_clean_assembly_to_salsa.RData")
#########################################################################################
#X
#########################################################################################
x=read.table("contig_order_v4.txt",se="\t")
newx=apply(as.matrix(x[,1]),1,function(x){strsplit(x,".fa")[[1]][1]})
X=cbind(newx,x[,2])
brahmanmashmapX=brahmanmashmap[brahmanmashmap[,6]%in%X[,1],]
brahmanmashmapX=brahmanmashmapX[match(X[,1],brahmanmashmapX[,6]),]
brahmanmashmapXtig=apply(as.matrix(brahmanmashmapX[,1]),1,function(x){strsplit(x,"\\|")[[1]][1]})
brahmanmashmapXnew=cbind(brahmanmashmapX,brahmanmashmapXtig)
brahmanbionanotig=apply(as.matrix(brahmanbionano[,6]),1,function(x){strsplit(x,"\\|")[[1]][1]})
brahmanbionanotignew=cbind(brahmanbionano,brahmanbionanotig)
mergedbrahman=merge(brahmanmashmapXnew,brahmanbionanotignew,by.x="brahmanmashmapXtig",by.y="brahmanbionanotig")
supermergedbrahman=mergedbrahman[,c(1,3,4,5,6,7,12,13,14,15)]
m=match(X[,1],mergedbrahman[,7])
mergedbrahman=mergedbrahman[m,]
checking=brahmanbionano[!is.na(mm),]
checking1=as.matrix(checking[checking[,5]=="W",])
write.table(dam_scaffolds_FINAL_agp_modi[match(X[,1],dam_scaffolds_FINAL_agp_modi[,6]),][isna,] ,file="missingtable-Xmatchedbionano.txt",row.names=F,col.names=F,sep="\t")
write.table(dam_scaffolds_FINAL_agp_modi[match(X[,1],dam_scaffolds_FINAL_agp_modi[,6]),],file="finaltable-Xsalsa.txt",row.names=F,col.names=F,sep="\t")
write.table(supermergedbrahman,file="finaltable-Xmatchedbionano.txt",row.names=F,col.names=F,sep="\t")
write.csv(brahmanbionanotignew[brahmanbionanotignew[,10]=="tig00011795",],file="missedtig00011795.csv")
write.csv(brahmanbionanotignew[brahmanbionanotignew[,10]=="tig00003246",],file="missedtig00003246.csv")
write.csv(brahmanbionanotignew[brahmanbionanotignew[,10]=="tig00011886",],file="missedtig00011886.csv")
write.csv(brahmanbionanotignew[brahmanbionanotignew[,10]=="tig00011907",],file="missedtig00011907.csv")
write.csv(brahmanbionanotignew[brahmanbionanotignew[,10]=="tig00479033",],file="missedtig00479033.csv")
write.csv(brahmanbionanotignew[brahmanbionanotignew[,10]=="tig00011826",],file="missedtig00011826.csv")
#########################################################################################
#dam full
#########################################################################################
colnames(brahmanmashmap)[6]="contig"
brahmanmashmapXnew2=merge(brahmanmashmap,dam_scaffolds_FINAL_agp_modi,by.x="contig",by.y="component_id")
colnames(brahmanmashmapXnew2)[2]="tig"
brahmanbionanotig=apply(as.matrix(brahmanbionano[,6]),1,function(x){strsplit(x,"_")[[1]][1]})
brahmanbionanotignew=cbind(brahmanbionano,brahmanbionanotig)
mergedbrahman=merge(brahmanmashmapXnew2,brahmanbionanotignew,by.x="tig",by.y="brahmanbionanotig")
#########################################################################################
#Y
#########################################################################################
y=read.csv("sireY_scaffold_info.csv",header=F)
y=y[y[,5]=="A",]
newYcontig=sire_scaffolds_FINAL_agp_modi[sire_scaffolds_FINAL_agp_modi[,1]%in%y[,6],]
for(i in 1:nrow(Y)){
if(i==1){
newYcontigbase=sire_scaffolds_FINAL_agp_modi[sire_scaffolds_FINAL_agp_modi[,1]%in%y[1,6],]
oriation=rep(as.character(y[i,9]),nrow(newYcontigbase))
}else{
newYcontigbase2=sire_scaffolds_FINAL_agp_modi[sire_scaffolds_FINAL_agp_modi[,1]%in%y[i,6],]
oriation2=rep(as.character(y[i,9]),nrow(newYcontigbase2))
newYcontigbase=rbind(newYcontigbase,newYcontigbase2)
oriation=c(oriation,oriation2)
}
}
Y=cbind(newYcontigbase[,6],oriation)
angusmashmapY=angusmashmap[angusmashmap[,6]%in%Y[,1],]
angusmashmapY=angusmashmapY[match(Y[,1],angusmashmapY[,6]),]
angusmashmapYtig=apply(as.matrix(angusmashmapY[,1]),1,function(x){strsplit(x,"\\|")[[1]][1]})
angusmashmapYnew=cbind(angusmashmapY,angusmashmapYtig)
angusbionanotig=apply(as.matrix(angusbionano[,6]),1,function(x){strsplit(x,"\\|")[[1]][1]})
angusbionanotignew=cbind(angusbionano,angusbionanotig)
mergedangus=merge(angusmashmapYnew,angusbionanotignew,by.x="angusmashmapYtig",by.y="angusbionanotig")
supermergedangus=mergedangus[,c(1,3,4,5,6,7,12,13,14,15)]
m=match(Y[,1],supermergedangus[,6])
supermergedangus=supermergedangus[m,]
table(as.matrix(supermergedangus[,7]))
special=rownames(table(as.matrix(supermergedangus[,7])))[1:12]
> special
[1] "Super-Scaffold_100035" "Super-Scaffold_100072" "Super-Scaffold_100129" "Super-Scaffold_100133" "Super-Scaffold_100160" "Super-Scaffold_100217" "Super-Scaffold_100308" "Super-Scaffold_100367"
[9] "Super-Scaffold_100404" "Super-Scaffold_100406" "Super-Scaffold_100477" "Super-Scaffold_100494"
checking=brahmanbionano[!is.na(mm),]
checking1=as.matrix(checking[checking[,5]=="W",])
write.table(sire_scaffolds_FINAL_agp_modi[match(X[,1],sire_scaffolds_FINAL_agp_modi[,6]),][isna,] ,file="missingtable-Xmatchedbionano.txt",row.names=F,col.names=F,sep="\t")
write.table(newYcontigbase,file="finaltable-Ysalsa.txt",row.names=F,col.names=F,sep="\t")
write.table(supermergedangus,file="finaltable-Ymatchedbionano.txt",row.names=F,col.names=F,sep="\t")
superscaffold=angusbionano[angusbionano[,1]%in%rownames(table(as.character(as.matrix(supermergedangus[,7]))))[1:11],]
specialtig=superscaffold[superscaffold[,5]=="W",6]
specialtig=apply(as.matrix(specialtig),1,function(x){strsplit(x,"_")[[1]][1]})
specialtig=rownames(table(specialtig))
sire_scaffolds_FINAL_agp_modi[match(angusmashmap[match(specialtig,angusmashmap[,1]),6],sire_scaffolds_FINAL_agp_modi[,6]),]
rownames(table(as.character(as.matrix(supermergedangus[,7]))))[1:11]
angusbionano[angusbionano[,1]%in%rownames(table(as.character(as.matrix(supermergedangus[,7]))))[1:11],6]
|
#annotate putative hemolymph proteins detected in DDA
setwd('~/Documents/genome_sciences_postdoc/geoduck/hemolymph')
hem.prot<-read.csv('Putative hemolymph proteins.csv', header=T)
setwd('~/Documents/genome_sciences_postdoc/geoduck/transcriptome/uniprot protein annotations')
annot<-read.csv('geoduck_blastp_uniprot2.csv', header=T)
names(annot)[names(annot)=='Query']<-'protein'
prot.name<-read.csv('uniprot protein names.csv', header=T)
names(prot.name)[names(prot.name)=='Entry.name']<-'Hit'
hem.annot<-merge(x=hem.prot, y=annot, by='protein', all.x=T)
hem.name<-merge(x=hem.annot, y=prot.name, by='Hit', all.x=T)
write.csv(hem.name, file='annotated putative hemolymph proteins.csv')
#SRM Skyline data
#read in file and subset by raw file number
sky.srm<-read.csv('Skyline output SRM hemolymph.csv', header=T, na.strings='#N/A')
EF18.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo2.raw'))
EF29.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo3.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo4.raw'))
EF30.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo5.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo6.raw'))
MF25.1<-rbind(subset(sky.srm, File.Name=='2016_September_20_geohemo7.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo8.raw'))
MF35.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo9.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo10.raw'))
LF51.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo13.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo14.raw'))
LF69.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo15.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo16.raw'))
LF70.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo17.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo18.raw'))
EM17.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo19.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo20.raw'))
EM20.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo21.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo22.raw'))
EM28.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo23.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo24.raw'))
MM42.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo25.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo26.raw'))
MM46.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo27.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo28.raw'))
LM65.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo29.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo30.raw'))
LM67.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo31.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo32.raw'))
LM68.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo33.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo34.raw'))
EF30.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geoheemo47.raw'), subset(sky.srm, File.Name=='2016_September_29_geoheemo48.raw'))
EF18.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geoheemo49.raw'), subset(sky.srm, File.Name=='2016_September_29_geoheemo50.raw'))
EF29.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geoheemo51.raw'), subset(sky.srm, File.Name=='2016_September_29_geoheemo52.raw'))
MF25.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo37.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo38.raw'))
MF35.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo39.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo40.raw'))
LF51.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo43.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo44.raw'))
LF69.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo45.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo46.raw'))
LF70.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo41.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo42.raw'))
EM17.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo65.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo66.raw'))
EM20.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo67.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo68.raw'))
EM28.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo63.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo64.raw'))
MM42.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo55.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo56.raw'))
MM46.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo53.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo254.raw'))
LM65.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo59.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo60.raw'))
LM67.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo61.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo62.raw'))
LM68.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo57.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo58.raw'))
#subset transition ID and area
EF18.1.sub<-subset(EF18.1, select=c(Transition.ID, Area))
EF18.2.sub<-subset(EF18.2, select=c(Transition.ID, Area))
EF29.1.sub<-subset(EF29.1, select=c(Transition.ID, Area))
EF29.2.sub<-subset(EF29.2, select=c(Transition.ID, Area))
EF30.1.sub<-subset(EF30.1, select=c(Transition.ID, Area))
EF30.2.sub<-subset(EF30.2, select=c(Transition.ID, Area))
MF25.1.sub<-subset(MF25.1, select=c(Transition.ID, Area))
MF25.2.sub<-subset(MF25.2, select=c(Transition.ID, Area))
MF35.1.sub<-subset(MF35.1, select=c(Transition.ID, Area))
MF35.2.sub<-subset(MF35.2, select=c(Transition.ID, Area))
LF51.1.sub<-subset(LF51.1, select=c(Transition.ID, Area))
LF51.2.sub<-subset(LF51.2, select=c(Transition.ID, Area))
LF69.1.sub<-subset(LF69.1, select=c(Transition.ID, Area))
LF69.2.sub<-subset(LF69.2, select=c(Transition.ID, Area))
LF70.1.sub<-subset(LF70.1, select=c(Transition.ID, Area))
LF70.2.sub<-subset(LF70.2, select=c(Transition.ID, Area))
EM17.1.sub<-subset(EM17.1, select=c(Transition.ID, Area))
EM17.2.sub<-subset(EM17.2, select=c(Transition.ID, Area))
EM20.1.sub<-subset(EM20.1, select=c(Transition.ID, Area))
EM20.2.sub<-subset(EM20.2, select=c(Transition.ID, Area))
EM28.1.sub<-subset(EM28.1, select=c(Transition.ID, Area))
EM28.2.sub<-subset(EM28.2, select=c(Transition.ID, Area))
MM42.1.sub<-subset(MM42.1, select=c(Transition.ID, Area))
MM42.2.sub<-subset(MM42.2, select=c(Transition.ID, Area))
MM46.1.sub<-subset(MM46.1, select=c(Transition.ID, Area))
MM46.2.sub<-subset(MM46.2, select=c(Transition.ID, Area))
LM65.1.sub<-subset(LM65.1, select=c(Transition.ID, Area))
LM65.2.sub<-subset(LM65.2, select=c(Transition.ID, Area))
LM67.1.sub<-subset(LM67.1, select=c(Transition.ID, Area))
LM67.2.sub<-subset(LM67.2, select=c(Transition.ID, Area))
LM68.1.sub<-subset(LM68.1, select=c(Transition.ID, Area))
LM68.2.sub<-subset(LM68.2, select=c(Transition.ID, Area))
#rename area columns
names(EF18.1.sub)[names(EF18.1.sub)=='Area']<-'EF18.1'
names(EF18.2.sub)[names(EF18.2.sub)=='Area']<-'EF18.2'
names(EF29.1.sub)[names(EF29.1.sub)=='Area']<-'EF29.1'
names(EF29.2.sub)[names(EF29.2.sub)=='Area']<-'EF29.2'
names(EF30.1.sub)[names(EF30.1.sub)=='Area']<-'EF30.1'
names(EF30.2.sub)[names(EF30.2.sub)=='Area']<-'EF30.2'
names(MF25.1.sub)[names(MF25.1.sub)=='Area']<-'MF25.1'
names(MF25.2.sub)[names(MF25.2.sub)=='Area']<-'MF25.2'
names(MF35.1.sub)[names(MF35.1.sub)=='Area']<-'MF35.1'
names(MF35.2.sub)[names(MF35.2.sub)=='Area']<-'MF35.2'
names(LF51.1.sub)[names(LF51.1.sub)=='Area']<-'LF51.1'
names(LF51.2.sub)[names(LF51.2.sub)=='Area']<-'LF51.2'
names(LF69.1.sub)[names(LF69.1.sub)=='Area']<-'LF69.1'
names(LF69.2.sub)[names(LF69.2.sub)=='Area']<-'LF69.2'
names(LF70.1.sub)[names(LF70.1.sub)=='Area']<-'LF70.1'
names(LF70.2.sub)[names(LF70.2.sub)=='Area']<-'LF70.2'
names(EM17.1.sub)[names(EM17.1.sub)=='Area']<-'EM17.1'
names(EM17.2.sub)[names(EM17.2.sub)=='Area']<-'EM17.2'
names(EM20.1.sub)[names(EM20.1.sub)=='Area']<-'EM20.1'
names(EM20.2.sub)[names(EM20.2.sub)=='Area']<-'EM20.2'
names(EM28.1.sub)[names(EM28.1.sub)=='Area']<-'EM28.1'
names(EM28.2.sub)[names(EM28.2.sub)=='Area']<-'EM28.2'
names(MM42.1.sub)[names(MM42.1.sub)=='Area']<-'MM42.1'
names(MM42.2.sub)[names(MM42.2.sub)=='Area']<-'MM42.2'
names(MM46.1.sub)[names(MM46.1.sub)=='Area']<-'MM46.1'
names(MM46.2.sub)[names(MM46.2.sub)=='Area']<-'MM46.2'
names(LM65.1.sub)[names(LM65.1.sub)=='Area']<-'LM65.1'
names(LM65.2.sub)[names(LM65.2.sub)=='Area']<-'LM65.2'
names(LM67.1.sub)[names(LM67.1.sub)=='Area']<-'LM67.1'
names(LM67.2.sub)[names(LM67.2.sub)=='Area']<-'LM67.2'
names(LM68.1.sub)[names(LM68.1.sub)=='Area']<-'LM68.1'
names(LM68.2.sub)[names(LM68.2.sub)=='Area']<-'LM68.2'
#merge all columns together
transitionIDs<-subset(EF18.1, select=Transition.ID)
merge1<-merge(x=transitionIDs, y=EF18.1.sub, by='Transition.ID', all.x=T)
merge2<-merge(x=merge1, y=EF18.2.sub, by='Transition.ID', all.x=T)
merge3<-merge(x=merge2, y=EF29.1.sub, by='Transition.ID', all.x=T)
merge4<-merge(x=merge3, y=EF29.2.sub, by='Transition.ID', all.x=T)
merge5<-merge(x=merge4, y=EF30.1.sub, by='Transition.ID', all.x=T)
merge6<-merge(x=merge5, y=EF30.2.sub, by='Transition.ID', all.x=T)
merge7<-merge(x=merge6, y=MF25.1.sub, by='Transition.ID', all.x=T)
merge8<-merge(x=merge7, y=MF25.2.sub, by='Transition.ID', all.x=T)
merge9<-merge(x=merge8, y=MF35.1.sub, by='Transition.ID', all.x=T)
merge10<-merge(x=merge9, y=MF35.2.sub, by='Transition.ID', all.x=T)
merge11<-merge(x=merge10, y=LF51.1.sub, by='Transition.ID', all.x=T)
merge12<-merge(x=merge11, y=LF51.2.sub, by='Transition.ID', all.x=T)
merge13<-merge(x=merge12, y=LF69.1.sub, by='Transition.ID', all.x=T)
merge14<-merge(x=merge13, y=LF69.2.sub, by='Transition.ID', all.x=T)
merge15<-merge(x=merge14, y=LF70.1.sub, by='Transition.ID', all.x=T)
merge16<-merge(x=merge15, y=LF70.2.sub, by='Transition.ID', all.x=T)
merge17<-merge(x=merge16, y=EM17.1.sub, by='Transition.ID', all.x=T)
merge18<-merge(x=merge17, y=EM17.2.sub, by='Transition.ID', all.x=T)
merge19<-merge(x=merge18, y=EM20.1.sub, by='Transition.ID', all.x=T)
merge20<-merge(x=merge19, y=EM20.2.sub, by='Transition.ID', all.x=T)
merge21<-merge(x=merge20, y=EM28.1.sub, by='Transition.ID', all.x=T)
merge22<-merge(x=merge21, y=EM28.2.sub, by='Transition.ID', all.x=T)
merge23<-merge(x=merge22, y=MM42.1.sub, by='Transition.ID', all.x=T)
merge24<-merge(x=merge23, y=MM42.2.sub, by='Transition.ID', all.x=T)
merge25<-merge(x=merge24, y=MM46.1.sub, by='Transition.ID', all.x=T)
merge26<-merge(x=merge25, y=MM46.2.sub, by='Transition.ID', all.x=T)
merge27<-merge(x=merge26, y=LM65.1.sub, by='Transition.ID', all.x=T)
merge28<-merge(x=merge27, y=LM65.2.sub, by='Transition.ID', all.x=T)
merge29<-merge(x=merge28, y=LM67.1.sub, by='Transition.ID', all.x=T)
merge30<-merge(x=merge29, y=LM67.2.sub, by='Transition.ID', all.x=T)
merge31<-merge(x=merge30, y=LM68.1.sub, by='Transition.ID', all.x=T)
merge32<-merge(x=merge31, y=LM68.2.sub, by='Transition.ID', all.x=T)
merge32[is.na(merge32)]<-0
#determine which PRTC intensities are stable across replicates
#calculate the slopes of intensities. want slope ~0
#first 33 rows are prtc
prtc<-subset(merge32, grepl(paste('PRTC', collapse="|"), merge32$Transition.ID))
prtc2<-prtc[,-1]
prtc.t<-t(prtc2)
prtc.df<-data.frame(prtc.t)
#find peptides with lowest cv across reps
library(raster)
prtc.cv<-apply(prtc.df, 2, cv)
X219 X220 X221 X222 X223 X224 X225 X226 X227 X228 X229
20.367026 18.949752 14.707365 15.967449 11.979733 15.185522 30.605430 31.749470 30.805100 20.221511 17.892436
X230 X231 X232 X233 X234 X235 X236 X237 X238 X239 X240
20.787204 13.512491 11.806373 10.338973 14.145489 12.561924 9.436204 19.552139 18.872567 16.198849 10.370778
X241 X242 X243 X244 X245 X246 X247 X248 X249 X250 X251
9.497709 9.336932 17.619883 17.042945 14.057066 9.764776 6.787763 4.239475 21.012342 21.752992 21.617016
#CVs < 10 are in columns 18, 23, 24, 28, 29, 30
prtc.lowcv<-subset(prtc.df, select=c(X236, X241, X242, X246, X247, X248))
prtc.lowcv.t<-t(prtc.lowcv)
prtc.avg<-apply(prtc.lowcv.t, 2, mean)
hemo.unnorm<-merge32[,-1]
rownames(hemo.unnorm)<-merge32[,1]
hemo.norm<-hemo.unnorm/prtc.avg
write.csv(hemo.norm, "Normalized SRM Hemolymph.csv")
#NMDS all reps
library(vegan)
hemo.t<-t(hemo.norm[1:218,])
hemo.tra<-(hemo.t+1)
hemo.tra<-data.trans(hemo.tra, method='log', plot=F)
hemo.nmds<-metaMDS(hemo.tra, distance='bray', k=2, trymax=100, autotransform=F)
ordiplot(hemo.nmds, choices=c(1,2), type='text', display='sites', xlab='Axis 1', ylab='Axis 2')
fig.hemo<-ordiplot(hemo.nmds, choices=c(1,2), type='none', display='sites', xlab='Axis 1', ylab='Axis 2')
points(fig.hemo, 'sites', col=c(rep('#DEEBF7',6), rep('#9ECAE1',4), rep('#3182BD',6), rep('#FEE6CE',6),rep('#FDAE6B',4),rep('#E6550D',6)), pch=c(rep(19,6), rep(15,4), rep(17,6), rep(19,6), rep(15,4), rep(17,6)), cex=1.5)
legend(-0.00021, 0.00017, legend=c('Male', "Female", "Early", "Mid", "Late"), pch=c(19,19,19,15,17), col=c('orange', 'blue', rep('black',3)))
#remove peptides with suspect transition times
hemo.RT<-read.csv('Normalized SRM Hemolymph good RT.csv', header=T, row.names=1)
hemo2.t<-t(hemo.RT)
hemo2.tra<-(hemo2.t+1)
hemo2.tra<-data.trans(hemo2.tra, method='log', plot=F)
hemo2.nmds<-metaMDS(hemo2.tra, distance='bray', k=2, trymax=100, autotransform=F)
ordiplot(hemo2.nmds, choices=c(1,2), type='text', display='sites', xlab='Axis 1', ylab='Axis 2')
#looks very similar to NMDS with all transitions
fig2.hemo<-ordiplot(hemo2.nmds, choices=c(1,2), type='none', display='sites', xlab='Axis 1', ylab='Axis 2')
points(fig2.hemo, 'sites', col=c(rep('#DEEBF7',6), rep('#9ECAE1',4), rep('#3182BD',6), rep('#FEE6CE',6),rep('#FDAE6B',4),rep('#E6550D',6)), pch=c(rep(19,6), rep(15,4), rep(17,6), rep(19,6), rep(15,4), rep(17,6)), cex=1.5)
legend(-0.00013, 0.0001, legend=c('Male', "Female", "Early-Stage", "Mid-Stage", "Late-Stage"), pch=c(19,19,19,15,17), col=c('#E6550D', '#3182BD', rep('black',3)))
par(new=T)
par(fig=c(0.49, 0.99,0.01, 0.51))
fig.avg.hemo<-ordiplot(hemo.avg.nmds, choices=c(1,2), type='none', display='sites', xlab='', ylab='', xaxt='n', yaxt='n', fg='grey33')
points(fig.avg.hemo, 'sites', col=c(rep('#DEEBF7',3), rep('#9ECAE1',2), rep('#3182BD',3), rep('#FEE6CE',3),rep('#FDAE6B',2),rep('#E6550D',3)), pch=c(rep(19,3), rep(15,2), rep(17,3), rep(19,3), rep(15,2), rep(17,3)))
#eigenvectors
vec.nsaf<-envfit(hemo2.nmds$points, hemo2.t, perm=1000)
write.csv(vec.nsaf, 'Eigenvectors for good RT hemolymph.csv')
#avg tech reps
EF18.avg<-apply(hemo.RT[1:2], 1, mean)
EF29.avg<-apply(hemo.RT[3:4], 1, mean)
EF30.avg<-apply(hemo.RT[5:6], 1, mean)
MF25.avg<-apply(hemo.RT[7:8], 1, mean)
MF35.avg<-apply(hemo.RT[9:10], 1, mean)
LF51.avg<-apply(hemo.RT[11:12], 1, mean)
LF69.avg<-apply(hemo.RT[13:14], 1, mean)
LF70.avg<-apply(hemo.RT[15:16], 1, mean)
EM17.avg<-apply(hemo.RT[17:18], 1, mean)
EM20.avg<-apply(hemo.RT[19:20], 1, mean)
EM28.avg<-apply(hemo.RT[21:22], 1, mean)
MM42.avg<-apply(hemo.RT[23:24], 1, mean)
MM46.avg<-apply(hemo.RT[25:26], 1, mean)
LM65.avg<-apply(hemo.RT[27:28], 1, mean)
LM67.avg<-apply(hemo.RT[29:30], 1, mean)
LM68.avg<-apply(hemo.RT[31:32], 1, mean)
all.avg<-cbind(EF18.avg, EF29.avg, EF30.avg, MF25.avg, MF35.avg, LF51.avg, LF69.avg, LF70.avg, EM17.avg, EM20.avg, EM28.avg, MM42.avg, MM46.avg, LM65.avg, LM67.avg, LM68.avg)
rownames(all.avg)<-rownames(hemo.RT)
write.csv(all.avg, 'hemolymph transitions averaged tech reps.csv')
#NMDS avg tech reps
hemoavg.t<-t(all.avg)
hemoavg.tra<-(hemoavg.t+1)
hemoavg.tra<-data.trans(hemoavg.tra, method='log', plot=F)
hemo.avg.nmds<-metaMDS(hemoavg.tra, distance='bray', k=2, trymax=100, autotransform=F)
ordiplot(hemo.avg.nmds, choices=c(1,2), type='text', display='sites', xlab='Axis 1', ylab='Axis 2')
fig.avg.hemo<-ordiplot(hemo.avg.nmds, choices=c(1,2), type='none', display='sites', xlab='Axis 1', ylab='Axis 2')
points(fig.avg.hemo, 'sites', col=c(rep('#DEEBF7',3), rep('#9ECAE1',2), rep('#3182BD',3), rep('#FEE6CE',3),rep('#FDAE6B',2),rep('#E6550D',3)), pch=c(rep(19,3), rep(15,2), rep(17,3), rep(19,3), rep(15,2), rep(17,3)), cex=1.5)
legend(-0.00002, 6e-5, legend=c('Male', "Female", "Early", "Mid", "Late"), pch=c(19,19,19,15,17), col=c('orange', 'blue', rep('black',3)))
#heat map avg tech reps
library(pheatmap)
library(RColorBrewer)
hm.col<-brewer.pal(9,'YlOrRd')
hemoRT.tra<-data.trans(all.avg, method='log', plot=F)
pheatmap(hemoRT.tra, cluster_rows=T, cluster_cols=T, clustering_distance_rows='euclidean', clustering_distance_cols='euclidean', clustering_method='average', show_rownames=F, color=hm.col)
#heat map of top significant transitions
sig.hemo<-read.csv('hemolymph sig transitions.csv', header=T, row.names=1)
sighemo.tra<-data.trans(sig.hemo, method='log', plot=F)
hm2.col<-brewer.pal(9,'Greens')
pheatmap(sighemo.tra, cluster_rows=T, cluster_cols=T, clustering_distance_rows='euclidean', clustering_distance_cols='euclidean', clustering_method='average', show_rownames=F, color=hm.col)
#ANOSIM
sex.stage<-c(rep("EF",3), rep("MF", 2), rep("LF", 3), rep("EM",3), rep("MM",2), rep("LM",3))
hemo.row<-data.stand(hemoavg.t, method='total', margin='row', plot=F)
hemo.d<-vegdist(hemo.row, 'bray')
hemo.anos<-anosim(hemo.d, grouping=sex.stage)
summary(hemo.anos)
ANOSIM statistic R: 0.4892
Significance: 0.001
Permutation: free
Number of permutations: 999
Upper quantiles of permutations (null model):
90% 95% 97.5% 99%
0.133 0.198 0.256 0.327
Dissimilarity ranks between and within classes:
0% 25% 50% 75% 100% N
Between 4 34.875 65.5 92.75 120 106
EF 17 19.500 22.0 40.00 58 3
EM 60 68.500 77.0 86.00 95 3
LF 3 23.000 43.0 53.50 64 3
LM 2 9.000 16.0 17.00 18 3
MF 1 1.000 1.0 1.00 1 1
MM 8 8.000 8.0 8.00 8 1
sex<-c(rep("F", 8), rep('M', 8))
sex.anos<-anosim(hemo.d, grouping=sex)
summary(sex.anos)
ANOSIM statistic R: 0.1384
Significance: 0.043
Permutation: free
Number of permutations: 999
Upper quantiles of permutations (null model):
90% 95% 97.5% 99%
0.0943 0.1211 0.1597 0.1993
Dissimilarity ranks between and within classes:
0% 25% 50% 75% 100% N
Between 4 34.50 68.25 93.25 120 64
F 1 20.75 43.75 58.75 84 28
M 2 42.00 87.50 102.75 119 28
stage<-c(rep('early', 3), rep('mid', 2), rep('late', 3), rep('early', 3), rep('mid', 2), rep('late', 3))
stage.anos<-anosim(hemo.d, grouping=stage)
summary(stage.anos)
ANOSIM statistic R: 0.1435
Significance: 0.065
Permutation: free
Number of permutations: 999
Upper quantiles of permutations (null model):
90% 95% 97.5% 99%
0.111 0.163 0.212 0.271
Dissimilarity ranks between and within classes:
0% 25% 50% 75% 100% N
Between 4 34.500 58.25 92.25 120.0 84
early 10 41.000 60.00 74.50 103.0 15
late 2 11.500 43.00 70.50 85.0 15
mid 1 30.875 99.50 112.25 116.5 6
#CV for all reps
EF18.cv<-apply(hemo.RT[1:2], 1, cv)
EF29.cv<-apply(hemo.RT[3:4], 1, cv)
EF30.cv<-apply(hemo.RT[5:6], 1, cv)
MF25.cv<-apply(hemo.RT[7:8], 1, cv)
MF35.cv<-apply(hemo.RT[9:10], 1, cv)
LF51.cv<-apply(hemo.RT[11:12], 1, cv)
LF69.cv<-apply(hemo.RT[13:14], 1, cv)
LF70.cv<-apply(hemo.RT[15:16], 1, cv)
EM17.cv<-apply(hemo.RT[17:18], 1, cv)
EM20.cv<-apply(hemo.RT[19:20], 1, cv)
EM28.cv<-apply(hemo.RT[21:22], 1, cv)
MM42.cv<-apply(hemo.RT[23:24], 1, cv)
MM46.cv<-apply(hemo.RT[25:26], 1, cv)
LM65.cv<-apply(hemo.RT[27:28], 1, cv)
LM67.cv<-apply(hemo.RT[29:30], 1, cv)
LM68.cv<-apply(hemo.RT[31:32], 1, cv)
geoduck.cv<-cbind(EF18.cv, EF29.cv, EF30.cv, MF25.cv, MF35.cv, LF51.cv, LF69.cv, LF70.cv, EM17.cv, EM20.cv, EM28.cv, MM42.cv, MM46.cv, LM65.cv, LM67.cv, LM68.cv)
#cvs across biological reps
EF.cv<-apply(hemo.RT[1:6], 1, cv)
MF.cv<-apply(hemo.RT[7:10], 1, cv)
LF.cv<-apply(hemo.RT[11:16], 1, cv)
EM.cv<-apply(hemo.RT[17:22], 1, cv)
MM.cv<-apply(hemo.RT[23:26], 1, cv)
LM.cv<-apply(hemo.RT[27:32], 1, cv)
biorep.cv<-cbind(EF.cv, MF.cv, LF.cv, EM.cv, MM.cv, LM.cv)
#boxplot of cvs for each dilution
boxplot(geoduck.cv, outline=T, names=c('EF18', 'EF29', 'EF30', 'MF25', 'MF35', 'LF51', 'LF69', 'LF70', 'EM17', 'EM20', 'EM28', 'MM42', 'MM46', 'LM65', 'LM67', 'LM68'), xlab='Geoduck Hemolymph Sample', ylab='Coefficient of Variation', las=2, ylim=c(0,300))
boxplot(biorep.cv, outline=T, names=c('EF', 'MF', 'LF', 'EM', 'MM', 'LM'), xlab='Geoduck Hemolymph Group', ylab='Coefficient of Variation', las=2, ylim=c(0,300)) | /R-code/hemolymph.R | no_license | emmats/supp-geoduck-proteomics | R | false | false | 20,880 | r | #annotate putative hemolymph proteins detected in DDA
setwd('~/Documents/genome_sciences_postdoc/geoduck/hemolymph')
hem.prot<-read.csv('Putative hemolymph proteins.csv', header=T)
setwd('~/Documents/genome_sciences_postdoc/geoduck/transcriptome/uniprot protein annotations')
annot<-read.csv('geoduck_blastp_uniprot2.csv', header=T)
names(annot)[names(annot)=='Query']<-'protein'
prot.name<-read.csv('uniprot protein names.csv', header=T)
names(prot.name)[names(prot.name)=='Entry.name']<-'Hit'
hem.annot<-merge(x=hem.prot, y=annot, by='protein', all.x=T)
hem.name<-merge(x=hem.annot, y=prot.name, by='Hit', all.x=T)
write.csv(hem.name, file='annotated putative hemolymph proteins.csv')
#SRM Skyline data
#read in file and subset by raw file number
sky.srm<-read.csv('Skyline output SRM hemolymph.csv', header=T, na.strings='#N/A')
EF18.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo2.raw'))
EF29.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo3.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo4.raw'))
EF30.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo5.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo6.raw'))
MF25.1<-rbind(subset(sky.srm, File.Name=='2016_September_20_geohemo7.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo8.raw'))
MF35.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo9.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo10.raw'))
LF51.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo13.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo14.raw'))
LF69.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo15.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo16.raw'))
LF70.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo17.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo18.raw'))
EM17.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo19.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo20.raw'))
EM20.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo21.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo22.raw'))
EM28.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo23.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo24.raw'))
MM42.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo25.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo26.raw'))
MM46.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo27.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo28.raw'))
LM65.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo29.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo30.raw'))
LM67.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo31.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo32.raw'))
LM68.1<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo33.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo34.raw'))
EF30.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geoheemo47.raw'), subset(sky.srm, File.Name=='2016_September_29_geoheemo48.raw'))
EF18.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geoheemo49.raw'), subset(sky.srm, File.Name=='2016_September_29_geoheemo50.raw'))
EF29.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geoheemo51.raw'), subset(sky.srm, File.Name=='2016_September_29_geoheemo52.raw'))
MF25.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo37.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo38.raw'))
MF35.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo39.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo40.raw'))
LF51.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo43.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo44.raw'))
LF69.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo45.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo46.raw'))
LF70.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo41.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo42.raw'))
EM17.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo65.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo66.raw'))
EM20.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo67.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo68.raw'))
EM28.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo63.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo64.raw'))
MM42.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo55.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo56.raw'))
MM46.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo53.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo254.raw'))
LM65.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo59.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo60.raw'))
LM67.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo61.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo62.raw'))
LM68.2<-rbind(subset(sky.srm, File.Name=='2016_September_29_geohemo57.raw'), subset(sky.srm, File.Name=='2016_September_29_geohemo58.raw'))
#subset transition ID and area
EF18.1.sub<-subset(EF18.1, select=c(Transition.ID, Area))
EF18.2.sub<-subset(EF18.2, select=c(Transition.ID, Area))
EF29.1.sub<-subset(EF29.1, select=c(Transition.ID, Area))
EF29.2.sub<-subset(EF29.2, select=c(Transition.ID, Area))
EF30.1.sub<-subset(EF30.1, select=c(Transition.ID, Area))
EF30.2.sub<-subset(EF30.2, select=c(Transition.ID, Area))
MF25.1.sub<-subset(MF25.1, select=c(Transition.ID, Area))
MF25.2.sub<-subset(MF25.2, select=c(Transition.ID, Area))
MF35.1.sub<-subset(MF35.1, select=c(Transition.ID, Area))
MF35.2.sub<-subset(MF35.2, select=c(Transition.ID, Area))
LF51.1.sub<-subset(LF51.1, select=c(Transition.ID, Area))
LF51.2.sub<-subset(LF51.2, select=c(Transition.ID, Area))
LF69.1.sub<-subset(LF69.1, select=c(Transition.ID, Area))
LF69.2.sub<-subset(LF69.2, select=c(Transition.ID, Area))
LF70.1.sub<-subset(LF70.1, select=c(Transition.ID, Area))
LF70.2.sub<-subset(LF70.2, select=c(Transition.ID, Area))
EM17.1.sub<-subset(EM17.1, select=c(Transition.ID, Area))
EM17.2.sub<-subset(EM17.2, select=c(Transition.ID, Area))
EM20.1.sub<-subset(EM20.1, select=c(Transition.ID, Area))
EM20.2.sub<-subset(EM20.2, select=c(Transition.ID, Area))
EM28.1.sub<-subset(EM28.1, select=c(Transition.ID, Area))
EM28.2.sub<-subset(EM28.2, select=c(Transition.ID, Area))
MM42.1.sub<-subset(MM42.1, select=c(Transition.ID, Area))
MM42.2.sub<-subset(MM42.2, select=c(Transition.ID, Area))
MM46.1.sub<-subset(MM46.1, select=c(Transition.ID, Area))
MM46.2.sub<-subset(MM46.2, select=c(Transition.ID, Area))
LM65.1.sub<-subset(LM65.1, select=c(Transition.ID, Area))
LM65.2.sub<-subset(LM65.2, select=c(Transition.ID, Area))
LM67.1.sub<-subset(LM67.1, select=c(Transition.ID, Area))
LM67.2.sub<-subset(LM67.2, select=c(Transition.ID, Area))
LM68.1.sub<-subset(LM68.1, select=c(Transition.ID, Area))
LM68.2.sub<-subset(LM68.2, select=c(Transition.ID, Area))
#rename area columns
names(EF18.1.sub)[names(EF18.1.sub)=='Area']<-'EF18.1'
names(EF18.2.sub)[names(EF18.2.sub)=='Area']<-'EF18.2'
names(EF29.1.sub)[names(EF29.1.sub)=='Area']<-'EF29.1'
names(EF29.2.sub)[names(EF29.2.sub)=='Area']<-'EF29.2'
names(EF30.1.sub)[names(EF30.1.sub)=='Area']<-'EF30.1'
names(EF30.2.sub)[names(EF30.2.sub)=='Area']<-'EF30.2'
names(MF25.1.sub)[names(MF25.1.sub)=='Area']<-'MF25.1'
names(MF25.2.sub)[names(MF25.2.sub)=='Area']<-'MF25.2'
names(MF35.1.sub)[names(MF35.1.sub)=='Area']<-'MF35.1'
names(MF35.2.sub)[names(MF35.2.sub)=='Area']<-'MF35.2'
names(LF51.1.sub)[names(LF51.1.sub)=='Area']<-'LF51.1'
names(LF51.2.sub)[names(LF51.2.sub)=='Area']<-'LF51.2'
names(LF69.1.sub)[names(LF69.1.sub)=='Area']<-'LF69.1'
names(LF69.2.sub)[names(LF69.2.sub)=='Area']<-'LF69.2'
names(LF70.1.sub)[names(LF70.1.sub)=='Area']<-'LF70.1'
names(LF70.2.sub)[names(LF70.2.sub)=='Area']<-'LF70.2'
names(EM17.1.sub)[names(EM17.1.sub)=='Area']<-'EM17.1'
names(EM17.2.sub)[names(EM17.2.sub)=='Area']<-'EM17.2'
names(EM20.1.sub)[names(EM20.1.sub)=='Area']<-'EM20.1'
names(EM20.2.sub)[names(EM20.2.sub)=='Area']<-'EM20.2'
names(EM28.1.sub)[names(EM28.1.sub)=='Area']<-'EM28.1'
names(EM28.2.sub)[names(EM28.2.sub)=='Area']<-'EM28.2'
names(MM42.1.sub)[names(MM42.1.sub)=='Area']<-'MM42.1'
names(MM42.2.sub)[names(MM42.2.sub)=='Area']<-'MM42.2'
names(MM46.1.sub)[names(MM46.1.sub)=='Area']<-'MM46.1'
names(MM46.2.sub)[names(MM46.2.sub)=='Area']<-'MM46.2'
names(LM65.1.sub)[names(LM65.1.sub)=='Area']<-'LM65.1'
names(LM65.2.sub)[names(LM65.2.sub)=='Area']<-'LM65.2'
names(LM67.1.sub)[names(LM67.1.sub)=='Area']<-'LM67.1'
names(LM67.2.sub)[names(LM67.2.sub)=='Area']<-'LM67.2'
names(LM68.1.sub)[names(LM68.1.sub)=='Area']<-'LM68.1'
names(LM68.2.sub)[names(LM68.2.sub)=='Area']<-'LM68.2'
#merge all columns together
transitionIDs<-subset(EF18.1, select=Transition.ID)
merge1<-merge(x=transitionIDs, y=EF18.1.sub, by='Transition.ID', all.x=T)
merge2<-merge(x=merge1, y=EF18.2.sub, by='Transition.ID', all.x=T)
merge3<-merge(x=merge2, y=EF29.1.sub, by='Transition.ID', all.x=T)
merge4<-merge(x=merge3, y=EF29.2.sub, by='Transition.ID', all.x=T)
merge5<-merge(x=merge4, y=EF30.1.sub, by='Transition.ID', all.x=T)
merge6<-merge(x=merge5, y=EF30.2.sub, by='Transition.ID', all.x=T)
merge7<-merge(x=merge6, y=MF25.1.sub, by='Transition.ID', all.x=T)
merge8<-merge(x=merge7, y=MF25.2.sub, by='Transition.ID', all.x=T)
merge9<-merge(x=merge8, y=MF35.1.sub, by='Transition.ID', all.x=T)
merge10<-merge(x=merge9, y=MF35.2.sub, by='Transition.ID', all.x=T)
merge11<-merge(x=merge10, y=LF51.1.sub, by='Transition.ID', all.x=T)
merge12<-merge(x=merge11, y=LF51.2.sub, by='Transition.ID', all.x=T)
merge13<-merge(x=merge12, y=LF69.1.sub, by='Transition.ID', all.x=T)
merge14<-merge(x=merge13, y=LF69.2.sub, by='Transition.ID', all.x=T)
merge15<-merge(x=merge14, y=LF70.1.sub, by='Transition.ID', all.x=T)
merge16<-merge(x=merge15, y=LF70.2.sub, by='Transition.ID', all.x=T)
merge17<-merge(x=merge16, y=EM17.1.sub, by='Transition.ID', all.x=T)
merge18<-merge(x=merge17, y=EM17.2.sub, by='Transition.ID', all.x=T)
merge19<-merge(x=merge18, y=EM20.1.sub, by='Transition.ID', all.x=T)
merge20<-merge(x=merge19, y=EM20.2.sub, by='Transition.ID', all.x=T)
merge21<-merge(x=merge20, y=EM28.1.sub, by='Transition.ID', all.x=T)
merge22<-merge(x=merge21, y=EM28.2.sub, by='Transition.ID', all.x=T)
merge23<-merge(x=merge22, y=MM42.1.sub, by='Transition.ID', all.x=T)
merge24<-merge(x=merge23, y=MM42.2.sub, by='Transition.ID', all.x=T)
merge25<-merge(x=merge24, y=MM46.1.sub, by='Transition.ID', all.x=T)
merge26<-merge(x=merge25, y=MM46.2.sub, by='Transition.ID', all.x=T)
merge27<-merge(x=merge26, y=LM65.1.sub, by='Transition.ID', all.x=T)
merge28<-merge(x=merge27, y=LM65.2.sub, by='Transition.ID', all.x=T)
merge29<-merge(x=merge28, y=LM67.1.sub, by='Transition.ID', all.x=T)
merge30<-merge(x=merge29, y=LM67.2.sub, by='Transition.ID', all.x=T)
merge31<-merge(x=merge30, y=LM68.1.sub, by='Transition.ID', all.x=T)
merge32<-merge(x=merge31, y=LM68.2.sub, by='Transition.ID', all.x=T)
merge32[is.na(merge32)]<-0
#determine which PRTC intensities are stable across replicates
#calculate the slopes of intensities. want slope ~0
#first 33 rows are prtc
prtc<-subset(merge32, grepl(paste('PRTC', collapse="|"), merge32$Transition.ID))
prtc2<-prtc[,-1]
prtc.t<-t(prtc2)
prtc.df<-data.frame(prtc.t)
#find peptides with lowest cv across reps
library(raster)
prtc.cv<-apply(prtc.df, 2, cv)
X219 X220 X221 X222 X223 X224 X225 X226 X227 X228 X229
20.367026 18.949752 14.707365 15.967449 11.979733 15.185522 30.605430 31.749470 30.805100 20.221511 17.892436
X230 X231 X232 X233 X234 X235 X236 X237 X238 X239 X240
20.787204 13.512491 11.806373 10.338973 14.145489 12.561924 9.436204 19.552139 18.872567 16.198849 10.370778
X241 X242 X243 X244 X245 X246 X247 X248 X249 X250 X251
9.497709 9.336932 17.619883 17.042945 14.057066 9.764776 6.787763 4.239475 21.012342 21.752992 21.617016
#CVs < 10 are in columns 18, 23, 24, 28, 29, 30
prtc.lowcv<-subset(prtc.df, select=c(X236, X241, X242, X246, X247, X248))
prtc.lowcv.t<-t(prtc.lowcv)
prtc.avg<-apply(prtc.lowcv.t, 2, mean)
hemo.unnorm<-merge32[,-1]
rownames(hemo.unnorm)<-merge32[,1]
hemo.norm<-hemo.unnorm/prtc.avg
write.csv(hemo.norm, "Normalized SRM Hemolymph.csv")
#NMDS all reps
library(vegan)
hemo.t<-t(hemo.norm[1:218,])
hemo.tra<-(hemo.t+1)
hemo.tra<-data.trans(hemo.tra, method='log', plot=F)
hemo.nmds<-metaMDS(hemo.tra, distance='bray', k=2, trymax=100, autotransform=F)
ordiplot(hemo.nmds, choices=c(1,2), type='text', display='sites', xlab='Axis 1', ylab='Axis 2')
fig.hemo<-ordiplot(hemo.nmds, choices=c(1,2), type='none', display='sites', xlab='Axis 1', ylab='Axis 2')
points(fig.hemo, 'sites', col=c(rep('#DEEBF7',6), rep('#9ECAE1',4), rep('#3182BD',6), rep('#FEE6CE',6),rep('#FDAE6B',4),rep('#E6550D',6)), pch=c(rep(19,6), rep(15,4), rep(17,6), rep(19,6), rep(15,4), rep(17,6)), cex=1.5)
legend(-0.00021, 0.00017, legend=c('Male', "Female", "Early", "Mid", "Late"), pch=c(19,19,19,15,17), col=c('orange', 'blue', rep('black',3)))
#remove peptides with suspect transition times
hemo.RT<-read.csv('Normalized SRM Hemolymph good RT.csv', header=T, row.names=1)
hemo2.t<-t(hemo.RT)
hemo2.tra<-(hemo2.t+1)
hemo2.tra<-data.trans(hemo2.tra, method='log', plot=F)
hemo2.nmds<-metaMDS(hemo2.tra, distance='bray', k=2, trymax=100, autotransform=F)
ordiplot(hemo2.nmds, choices=c(1,2), type='text', display='sites', xlab='Axis 1', ylab='Axis 2')
#looks very similar to NMDS with all transitions
fig2.hemo<-ordiplot(hemo2.nmds, choices=c(1,2), type='none', display='sites', xlab='Axis 1', ylab='Axis 2')
points(fig2.hemo, 'sites', col=c(rep('#DEEBF7',6), rep('#9ECAE1',4), rep('#3182BD',6), rep('#FEE6CE',6),rep('#FDAE6B',4),rep('#E6550D',6)), pch=c(rep(19,6), rep(15,4), rep(17,6), rep(19,6), rep(15,4), rep(17,6)), cex=1.5)
legend(-0.00013, 0.0001, legend=c('Male', "Female", "Early-Stage", "Mid-Stage", "Late-Stage"), pch=c(19,19,19,15,17), col=c('#E6550D', '#3182BD', rep('black',3)))
par(new=T)
par(fig=c(0.49, 0.99,0.01, 0.51))
fig.avg.hemo<-ordiplot(hemo.avg.nmds, choices=c(1,2), type='none', display='sites', xlab='', ylab='', xaxt='n', yaxt='n', fg='grey33')
points(fig.avg.hemo, 'sites', col=c(rep('#DEEBF7',3), rep('#9ECAE1',2), rep('#3182BD',3), rep('#FEE6CE',3),rep('#FDAE6B',2),rep('#E6550D',3)), pch=c(rep(19,3), rep(15,2), rep(17,3), rep(19,3), rep(15,2), rep(17,3)))
#eigenvectors
vec.nsaf<-envfit(hemo2.nmds$points, hemo2.t, perm=1000)
write.csv(vec.nsaf, 'Eigenvectors for good RT hemolymph.csv')
#avg tech reps
EF18.avg<-apply(hemo.RT[1:2], 1, mean)
EF29.avg<-apply(hemo.RT[3:4], 1, mean)
EF30.avg<-apply(hemo.RT[5:6], 1, mean)
MF25.avg<-apply(hemo.RT[7:8], 1, mean)
MF35.avg<-apply(hemo.RT[9:10], 1, mean)
LF51.avg<-apply(hemo.RT[11:12], 1, mean)
LF69.avg<-apply(hemo.RT[13:14], 1, mean)
LF70.avg<-apply(hemo.RT[15:16], 1, mean)
EM17.avg<-apply(hemo.RT[17:18], 1, mean)
EM20.avg<-apply(hemo.RT[19:20], 1, mean)
EM28.avg<-apply(hemo.RT[21:22], 1, mean)
MM42.avg<-apply(hemo.RT[23:24], 1, mean)
MM46.avg<-apply(hemo.RT[25:26], 1, mean)
LM65.avg<-apply(hemo.RT[27:28], 1, mean)
LM67.avg<-apply(hemo.RT[29:30], 1, mean)
LM68.avg<-apply(hemo.RT[31:32], 1, mean)
all.avg<-cbind(EF18.avg, EF29.avg, EF30.avg, MF25.avg, MF35.avg, LF51.avg, LF69.avg, LF70.avg, EM17.avg, EM20.avg, EM28.avg, MM42.avg, MM46.avg, LM65.avg, LM67.avg, LM68.avg)
rownames(all.avg)<-rownames(hemo.RT)
write.csv(all.avg, 'hemolymph transitions averaged tech reps.csv')
#NMDS avg tech reps
hemoavg.t<-t(all.avg)
hemoavg.tra<-(hemoavg.t+1)
hemoavg.tra<-data.trans(hemoavg.tra, method='log', plot=F)
hemo.avg.nmds<-metaMDS(hemoavg.tra, distance='bray', k=2, trymax=100, autotransform=F)
ordiplot(hemo.avg.nmds, choices=c(1,2), type='text', display='sites', xlab='Axis 1', ylab='Axis 2')
fig.avg.hemo<-ordiplot(hemo.avg.nmds, choices=c(1,2), type='none', display='sites', xlab='Axis 1', ylab='Axis 2')
points(fig.avg.hemo, 'sites', col=c(rep('#DEEBF7',3), rep('#9ECAE1',2), rep('#3182BD',3), rep('#FEE6CE',3),rep('#FDAE6B',2),rep('#E6550D',3)), pch=c(rep(19,3), rep(15,2), rep(17,3), rep(19,3), rep(15,2), rep(17,3)), cex=1.5)
legend(-0.00002, 6e-5, legend=c('Male', "Female", "Early", "Mid", "Late"), pch=c(19,19,19,15,17), col=c('orange', 'blue', rep('black',3)))
#heat map avg tech reps
library(pheatmap)
library(RColorBrewer)
hm.col<-brewer.pal(9,'YlOrRd')
hemoRT.tra<-data.trans(all.avg, method='log', plot=F)
pheatmap(hemoRT.tra, cluster_rows=T, cluster_cols=T, clustering_distance_rows='euclidean', clustering_distance_cols='euclidean', clustering_method='average', show_rownames=F, color=hm.col)
#heat map of top significant transitions
sig.hemo<-read.csv('hemolymph sig transitions.csv', header=T, row.names=1)
sighemo.tra<-data.trans(sig.hemo, method='log', plot=F)
hm2.col<-brewer.pal(9,'Greens')
pheatmap(sighemo.tra, cluster_rows=T, cluster_cols=T, clustering_distance_rows='euclidean', clustering_distance_cols='euclidean', clustering_method='average', show_rownames=F, color=hm.col)
#ANOSIM
sex.stage<-c(rep("EF",3), rep("MF", 2), rep("LF", 3), rep("EM",3), rep("MM",2), rep("LM",3))
hemo.row<-data.stand(hemoavg.t, method='total', margin='row', plot=F)
hemo.d<-vegdist(hemo.row, 'bray')
hemo.anos<-anosim(hemo.d, grouping=sex.stage)
summary(hemo.anos)
ANOSIM statistic R: 0.4892
Significance: 0.001
Permutation: free
Number of permutations: 999
Upper quantiles of permutations (null model):
90% 95% 97.5% 99%
0.133 0.198 0.256 0.327
Dissimilarity ranks between and within classes:
0% 25% 50% 75% 100% N
Between 4 34.875 65.5 92.75 120 106
EF 17 19.500 22.0 40.00 58 3
EM 60 68.500 77.0 86.00 95 3
LF 3 23.000 43.0 53.50 64 3
LM 2 9.000 16.0 17.00 18 3
MF 1 1.000 1.0 1.00 1 1
MM 8 8.000 8.0 8.00 8 1
sex<-c(rep("F", 8), rep('M', 8))
sex.anos<-anosim(hemo.d, grouping=sex)
summary(sex.anos)
ANOSIM statistic R: 0.1384
Significance: 0.043
Permutation: free
Number of permutations: 999
Upper quantiles of permutations (null model):
90% 95% 97.5% 99%
0.0943 0.1211 0.1597 0.1993
Dissimilarity ranks between and within classes:
0% 25% 50% 75% 100% N
Between 4 34.50 68.25 93.25 120 64
F 1 20.75 43.75 58.75 84 28
M 2 42.00 87.50 102.75 119 28
stage<-c(rep('early', 3), rep('mid', 2), rep('late', 3), rep('early', 3), rep('mid', 2), rep('late', 3))
stage.anos<-anosim(hemo.d, grouping=stage)
summary(stage.anos)
ANOSIM statistic R: 0.1435
Significance: 0.065
Permutation: free
Number of permutations: 999
Upper quantiles of permutations (null model):
90% 95% 97.5% 99%
0.111 0.163 0.212 0.271
Dissimilarity ranks between and within classes:
0% 25% 50% 75% 100% N
Between 4 34.500 58.25 92.25 120.0 84
early 10 41.000 60.00 74.50 103.0 15
late 2 11.500 43.00 70.50 85.0 15
mid 1 30.875 99.50 112.25 116.5 6
#CV for all reps
EF18.cv<-apply(hemo.RT[1:2], 1, cv)
EF29.cv<-apply(hemo.RT[3:4], 1, cv)
EF30.cv<-apply(hemo.RT[5:6], 1, cv)
MF25.cv<-apply(hemo.RT[7:8], 1, cv)
MF35.cv<-apply(hemo.RT[9:10], 1, cv)
LF51.cv<-apply(hemo.RT[11:12], 1, cv)
LF69.cv<-apply(hemo.RT[13:14], 1, cv)
LF70.cv<-apply(hemo.RT[15:16], 1, cv)
EM17.cv<-apply(hemo.RT[17:18], 1, cv)
EM20.cv<-apply(hemo.RT[19:20], 1, cv)
EM28.cv<-apply(hemo.RT[21:22], 1, cv)
MM42.cv<-apply(hemo.RT[23:24], 1, cv)
MM46.cv<-apply(hemo.RT[25:26], 1, cv)
LM65.cv<-apply(hemo.RT[27:28], 1, cv)
LM67.cv<-apply(hemo.RT[29:30], 1, cv)
LM68.cv<-apply(hemo.RT[31:32], 1, cv)
geoduck.cv<-cbind(EF18.cv, EF29.cv, EF30.cv, MF25.cv, MF35.cv, LF51.cv, LF69.cv, LF70.cv, EM17.cv, EM20.cv, EM28.cv, MM42.cv, MM46.cv, LM65.cv, LM67.cv, LM68.cv)
#cvs across biological reps
EF.cv<-apply(hemo.RT[1:6], 1, cv)
MF.cv<-apply(hemo.RT[7:10], 1, cv)
LF.cv<-apply(hemo.RT[11:16], 1, cv)
EM.cv<-apply(hemo.RT[17:22], 1, cv)
MM.cv<-apply(hemo.RT[23:26], 1, cv)
LM.cv<-apply(hemo.RT[27:32], 1, cv)
biorep.cv<-cbind(EF.cv, MF.cv, LF.cv, EM.cv, MM.cv, LM.cv)
#boxplot of cvs for each dilution
boxplot(geoduck.cv, outline=T, names=c('EF18', 'EF29', 'EF30', 'MF25', 'MF35', 'LF51', 'LF69', 'LF70', 'EM17', 'EM20', 'EM28', 'MM42', 'MM46', 'LM65', 'LM67', 'LM68'), xlab='Geoduck Hemolymph Sample', ylab='Coefficient of Variation', las=2, ylim=c(0,300))
boxplot(biorep.cv, outline=T, names=c('EF', 'MF', 'LF', 'EM', 'MM', 'LM'), xlab='Geoduck Hemolymph Group', ylab='Coefficient of Variation', las=2, ylim=c(0,300)) |
# install.packages("visNetwork")
if(! "arules" %in% installed.packages()) install.packages("arules")
if(! "arulesViz" %in% installed.packages()) install.packages("arulesViz")
library(arules)
library(arulesViz)
library(visNetwork)
library(igraph)
data<-read.csv("female_like.csv",stringsAsFactors =F,sep="\t",header=T)
names(data)
names(data)<-c("id","name")
str(data$name)
#make the data to the list for transfer to transaction with 'split'
lst <- split(data$name,data$id)
head(lst,1)
# 환경변수를 없애서 패키지 충돌로 인한 오류 방지용 함수.
# 혹시 문제가 있을 시 이 함수를 적용 후, 다시 실행 권장.
dev.off()
aggrData <- lst
listData <- list()
#중복 제거.
for (i in 1:length(aggrData)) {
listData[[i]] <- as.character(aggrData[[i]][!duplicated(aggrData[[i]])])
}
# user Id 별로 잘 들어 갔는지 확인해 봅시다.
#2번째 사용자의 좋아하는 페이지
listData[[2]]
# make transactions합니다.
trans <- as(listData,"transactions")
#head로 데이터를 앞에 6개만 확인하려 하면, sparse format의 transactions이다라고만 나옵니다.
head(trans)
#앞에서 의도한 대로 head를 쓰고 싶다면, trans 데이터는 inspect()함수로 확인해야 합니다.
inspect(head(trans,2))
#dim()함수는 dimension의 줄임말로, 해당 객체의 차원 정보, 즉, 몇개의 columns와, rows로 이루어져 있는지 말해주는 함수입니다.
#총 2262명의 사용자들의 좋아하는 page들은 75587개다라는 정보를 알 수 있습니다.
dim(trans)
# 이게 무슨 말일까요? 앞에서 말씀드렸다시피, 객체의 차원이 그렇다면 75587 x 2262라는 건데요.
#transaction data의 형태는, (슬라이드) 이와 같이 말 그대로, sparse의 형태이기 때문에, '75587개의 columns와 2262개의 rows로 이루어진 sparse Matrix다.'라는 말입니다.
#그렇기 때문에 head()함수로 transactions을 표현하기는 힘들기 때문에, inpect를 사용합니다. 그리고, 전체적인 객체의 형태를 알고 싶다면, summary()함수를 사용해서 데이터를 확인합니다.
summary(trans)
#----------------------------------------------------------------------------------
# 자, 이번엔 시각화를 통해서 데이터의 분포를 간단히 살펴 봅니다.
#check the most frequency of the items
#상위 30개 가장 많이 좋아요를 받은 페이지들을 살펴 볼까요?
# 페이지 이름이 너무 서로 겹치지 않도록 70%정도의 크기로 x축에 나오게 설정합니다.
itemFrequencyPlot(trans, topN=30, cex.names=.7)
# Mine Association Rules
# 자 그럼, 이제 본격적으로 apriori()함수를 사용해서, rule들을 추출해 봅니다.
# 아래의 설정은, trans 데이터를 가지고, parameter로 조건을 설정하는데, support를 5%이상의 출현, 그리고 rule의 크기는 'lhs, rhs를 합쳐서 2의 크기 (ex A -> B) 이상의 길이를 가진 규칙이면 다 뽑아라.'고 설정한 것입니다.
r <- apriori(trans, parameter = list(supp=0.05,minlen=2))
#이렇게 생성한 규칙에서 support가 가장 높은 순서대로 top 15을 알아보겠습니다.
inspect(head(sort(r,by="support",decreasing=T),n=15))
# 이 자료의 경우, 3%이상의 다 컨피던스가 높아요. 0.8. 즉, 80%이상이죠 왠만하면. 그래서 딱히 confidence에서 변별력을 찾을 수 없기때문에, 서포트에 중점을 두고 우리는 볼 필요가 있겠습니다.
plot(r)
sub<-head(sort(r,by="lift"),n=10)
inspect(head(sort(r,by="lift"),n=10))
#plot(head(sort(r,by="lift"),n=10),method="paracoord",control=list(type="items"))
dd<-plot(head(sort(r,by="lift"),n=10),method="graph",control=list(type="items"))
#----------------------------------움직이는 시각화 -----------------------------------#
ig_df <- get.data.frame(dd, what="both")
inspect(head(sort(r,by="lift"),n=10))
#data preprocessing
ifelse(is.na(ig_df$vertices$support),0.00001,ig_df$vertices$support)
ig_df$vertices$support<-ifelse(is.na(ig_df$vertices$support),0.0001,ig_df$vertices$support)
visNetwork(
nodes=data.frame(
id=ig_df$vertices$name
,value=ig_df$vertices$support
,title=ifelse(ig_df$vertices$label=="",ig_df$vertices$name,ig_df$vertices$label)
,ig_df$vertices
)
,edges = ig_df$edges
)%>%
visEdges(arrows ="to")%>%
visOptions(highlightNearest=T)
#plot(head(sort(r,by="support"),n=50)) 5%로는 몇명일까.
y <- nrow(trans)*0.05
y | /Arules_female_like.R | no_license | atoa91/Arules | R | false | false | 4,493 | r | # install.packages("visNetwork")
if(! "arules" %in% installed.packages()) install.packages("arules")
if(! "arulesViz" %in% installed.packages()) install.packages("arulesViz")
library(arules)
library(arulesViz)
library(visNetwork)
library(igraph)
data<-read.csv("female_like.csv",stringsAsFactors =F,sep="\t",header=T)
names(data)
names(data)<-c("id","name")
str(data$name)
#make the data to the list for transfer to transaction with 'split'
lst <- split(data$name,data$id)
head(lst,1)
# 환경변수를 없애서 패키지 충돌로 인한 오류 방지용 함수.
# 혹시 문제가 있을 시 이 함수를 적용 후, 다시 실행 권장.
dev.off()
aggrData <- lst
listData <- list()
#중복 제거.
for (i in 1:length(aggrData)) {
listData[[i]] <- as.character(aggrData[[i]][!duplicated(aggrData[[i]])])
}
# user Id 별로 잘 들어 갔는지 확인해 봅시다.
#2번째 사용자의 좋아하는 페이지
listData[[2]]
# make transactions합니다.
trans <- as(listData,"transactions")
#head로 데이터를 앞에 6개만 확인하려 하면, sparse format의 transactions이다라고만 나옵니다.
head(trans)
#앞에서 의도한 대로 head를 쓰고 싶다면, trans 데이터는 inspect()함수로 확인해야 합니다.
inspect(head(trans,2))
#dim()함수는 dimension의 줄임말로, 해당 객체의 차원 정보, 즉, 몇개의 columns와, rows로 이루어져 있는지 말해주는 함수입니다.
#총 2262명의 사용자들의 좋아하는 page들은 75587개다라는 정보를 알 수 있습니다.
dim(trans)
# 이게 무슨 말일까요? 앞에서 말씀드렸다시피, 객체의 차원이 그렇다면 75587 x 2262라는 건데요.
#transaction data의 형태는, (슬라이드) 이와 같이 말 그대로, sparse의 형태이기 때문에, '75587개의 columns와 2262개의 rows로 이루어진 sparse Matrix다.'라는 말입니다.
#그렇기 때문에 head()함수로 transactions을 표현하기는 힘들기 때문에, inpect를 사용합니다. 그리고, 전체적인 객체의 형태를 알고 싶다면, summary()함수를 사용해서 데이터를 확인합니다.
summary(trans)
#----------------------------------------------------------------------------------
# 자, 이번엔 시각화를 통해서 데이터의 분포를 간단히 살펴 봅니다.
#check the most frequency of the items
#상위 30개 가장 많이 좋아요를 받은 페이지들을 살펴 볼까요?
# 페이지 이름이 너무 서로 겹치지 않도록 70%정도의 크기로 x축에 나오게 설정합니다.
itemFrequencyPlot(trans, topN=30, cex.names=.7)
# Mine Association Rules
# 자 그럼, 이제 본격적으로 apriori()함수를 사용해서, rule들을 추출해 봅니다.
# 아래의 설정은, trans 데이터를 가지고, parameter로 조건을 설정하는데, support를 5%이상의 출현, 그리고 rule의 크기는 'lhs, rhs를 합쳐서 2의 크기 (ex A -> B) 이상의 길이를 가진 규칙이면 다 뽑아라.'고 설정한 것입니다.
r <- apriori(trans, parameter = list(supp=0.05,minlen=2))
#이렇게 생성한 규칙에서 support가 가장 높은 순서대로 top 15을 알아보겠습니다.
inspect(head(sort(r,by="support",decreasing=T),n=15))
# 이 자료의 경우, 3%이상의 다 컨피던스가 높아요. 0.8. 즉, 80%이상이죠 왠만하면. 그래서 딱히 confidence에서 변별력을 찾을 수 없기때문에, 서포트에 중점을 두고 우리는 볼 필요가 있겠습니다.
plot(r)
sub<-head(sort(r,by="lift"),n=10)
inspect(head(sort(r,by="lift"),n=10))
#plot(head(sort(r,by="lift"),n=10),method="paracoord",control=list(type="items"))
dd<-plot(head(sort(r,by="lift"),n=10),method="graph",control=list(type="items"))
#----------------------------------움직이는 시각화 -----------------------------------#
ig_df <- get.data.frame(dd, what="both")
inspect(head(sort(r,by="lift"),n=10))
#data preprocessing
ifelse(is.na(ig_df$vertices$support),0.00001,ig_df$vertices$support)
ig_df$vertices$support<-ifelse(is.na(ig_df$vertices$support),0.0001,ig_df$vertices$support)
visNetwork(
nodes=data.frame(
id=ig_df$vertices$name
,value=ig_df$vertices$support
,title=ifelse(ig_df$vertices$label=="",ig_df$vertices$name,ig_df$vertices$label)
,ig_df$vertices
)
,edges = ig_df$edges
)%>%
visEdges(arrows ="to")%>%
visOptions(highlightNearest=T)
#plot(head(sort(r,by="support"),n=50)) 5%로는 몇명일까.
y <- nrow(trans)*0.05
y |
library(jsonlite)
library(dplyr)
#1
library(readr)
X103_slalry_education <- read_csv("103 slalry education.csv")
X106_slalry_education <- read_csv("106 slalry education.csv")
X106_slalry_education$大職業別<- gsub("_","、",X106_slalry_education$大職業別)
X103_106_slalry_education <- inner_join(X103_slalry_education,X106_slalry_education,by="大職業別")
X103_106_slalry_education$`大學-薪資.x`<- gsub("—",0,X103_106_slalry_education$`大學-薪資.x`)
X103_106_slalry_education$`大學-薪資.y`<- gsub("—",0,X103_106_slalry_education$`大學-薪資.y`)
X103_106_slalry_education$`大學-薪資.x`<- as.numeric(X103_106_slalry_education$`大學-薪資.x`)
X103_106_slalry_education$`大學-薪資.y`<- as.numeric(X103_106_slalry_education$`大學-薪資.y`)
highersalary_106 <- filter(X103_106_slalry_education, `大學-薪資.y` > `大學-薪資.x`)
salaryrate_106 <- mutate(highersalary_106, rate = `大學-薪資.y`/ `大學-薪資.x`)
head(salaryrate_106[order(salaryrate_106$rate,decreasing = T),], 10)
salaryrate_over_1.05 <- filter(salaryrate_106, rate > 1.05)
jobtype <- strsplit(salaryrate_over_1.05$大職業別,"[-]")
strjob <- c()
for (i in 1:length(jobtype)){
strjob[i] <- jobtype[[i]][1]
}
table(strjob)
#2
library(readr)
X103_slalry_education <- read_csv("103 slalry education.csv")
X103_slalry_education$`大學-女/男`<- gsub("—",NA,X103_slalry_education$`大學-女/男`)
X103_slalry_education$`大學-女/男`<- gsub("…",NA,X103_slalry_education$`大學-女/男`)
X103_slalry_education$`大學-女/男`<- as.numeric(X103_slalry_education$`大學-女/男`)
X103gender <- select(X103_slalry_education, 大職業別,`大學-女/男`)
X103boy <- filter(X103gender,`大學-女/男`< 100)
head(X103boy[order(X103boy$`大學-女/男`,decreasing = T),], 10)
X103girl <- filter(X103gender,`大學-女/男`> 100)
X104_slalry_education <- read_csv("104 slalry education.csv")
X104_slalry_education$`大學-女/男`<- gsub("—",NA,X104_slalry_education$`大學-女/男`)
X104_slalry_education$`大學-女/男`<- gsub("…",NA,X104_slalry_education$`大學-女/男`)
X104_slalry_education$`大學-女/男`<- as.numeric(X104_slalry_education$`大學-女/男`)
X104gender <- select(X104_slalry_education, 大職業別,`大學-女/男`)
X104boy <- filter(X104gender,`大學-女/男`< 100)
head(X104boy[order(X104boy$`大學-女/男`,decreasing = T),], 10)
X104girl <- filter(X104gender,`大學-女/男`> 100)
X105_slalry_education <- read_csv("105 slalry education.csv")
X105_slalry_education$`大學-女/男`<- gsub("—",NA,X105_slalry_education$`大學-女/男`)
X105_slalry_education$`大學-女/男`<- gsub("…",NA,X105_slalry_education$`大學-女/男`)
X105_slalry_education$`大學-女/男`<- as.numeric(X105_slalry_education$`大學-女/男`)
X105gender <- select(X105_slalry_education, 大職業別,`大學-女/男`)
X105boy <- filter(X105gender,`大學-女/男`< 100)
head(X105boy[order(X105boy$`大學-女/男`,decreasing = T),], 10)
X105girl <- filter(X105gender,`大學-女/男`> 100)
X106_slalry_education <- read_csv("106 slalry education.csv")
X106_slalry_education$大職業別<- gsub("_","、",X106_slalry_education$大職業別)
X106_slalry_education$`大學-女/男`<- gsub("—",NA,X106_slalry_education$`大學-女/男`)
X106_slalry_education$`大學-女/男`<- gsub("…",NA,X106_slalry_education$`大學-女/男`)
X106_slalry_education$`大學-女/男`<- as.numeric(X106_slalry_education$`大學-女/男`)
X106gender <- select(X106_slalry_education, 大職業別,`大學-女/男`)
X106boy <- filter(X106gender,`大學-女/男`< 100)
head(X106boy[order(X106boy$`大學-女/男`,decreasing = T),], 10)
X106girl <- filter(X106gender,`大學-女/男`> 100)
str(X103_slalry_education)
#3
library(readr)
X106_slalry_education <- read_csv("106 slalry education.csv")
X106_slalry_education$`大學-薪資`<- gsub("—",0,X106_slalry_education$`大學-薪資`)
X106_slalry_education$`研究所及以上-薪資`<- gsub("—",0,X106_slalry_education$`研究所及以上-薪資`)
X106_slalry_education$`大學-薪資`<- as.numeric(X106_slalry_education$`大學-薪資`)
X106_slalry_education$`研究所及以上-薪資`<- as.numeric(X106_slalry_education$`研究所及以上-薪資`)
X106_slalry_education$salary_differ <- X106_slalry_education$`研究所及以上-薪資`/ X106_slalry_education$`大學-薪資`
head(X106_slalry_education[order(X106_slalry_education$salary_differ,decreasing = T),], 10)
X106_slalry_education$salary_differ
#4
library(readr)
X106_slalry_education <- read_csv("106 slalry education.csv")
myfavorite <- subset(X106_slalry_education, 大職業別 == "金融及保險業-專業人員" |
大職業別 == "金融及保險業-技術員及助理專業人員" |
大職業別 == "金融及保險業-事務支援人員")
myfavorite <- myfavorite[,c(1,2,11,13)]
myfavorite$`大學-薪資`<- as.numeric(myfavorite$`大學-薪資`)
myfavorite$`研究所及以上-薪資`<- as.numeric(myfavorite$`研究所及以上-薪資`)
knitr::kable(mutate106 <- mutate(myfavorite, Comparesalary_106 = `研究所及以上-薪資` - `大學-薪資`))
| /DataAnalysis.R | no_license | CGUIM-BigDataAnalysis/107bigdatacguimhw1-JasonWengBee | R | false | false | 5,222 | r | library(jsonlite)
library(dplyr)
#1
library(readr)
X103_slalry_education <- read_csv("103 slalry education.csv")
X106_slalry_education <- read_csv("106 slalry education.csv")
X106_slalry_education$大職業別<- gsub("_","、",X106_slalry_education$大職業別)
X103_106_slalry_education <- inner_join(X103_slalry_education,X106_slalry_education,by="大職業別")
X103_106_slalry_education$`大學-薪資.x`<- gsub("—",0,X103_106_slalry_education$`大學-薪資.x`)
X103_106_slalry_education$`大學-薪資.y`<- gsub("—",0,X103_106_slalry_education$`大學-薪資.y`)
X103_106_slalry_education$`大學-薪資.x`<- as.numeric(X103_106_slalry_education$`大學-薪資.x`)
X103_106_slalry_education$`大學-薪資.y`<- as.numeric(X103_106_slalry_education$`大學-薪資.y`)
highersalary_106 <- filter(X103_106_slalry_education, `大學-薪資.y` > `大學-薪資.x`)
salaryrate_106 <- mutate(highersalary_106, rate = `大學-薪資.y`/ `大學-薪資.x`)
head(salaryrate_106[order(salaryrate_106$rate,decreasing = T),], 10)
salaryrate_over_1.05 <- filter(salaryrate_106, rate > 1.05)
jobtype <- strsplit(salaryrate_over_1.05$大職業別,"[-]")
strjob <- c()
for (i in 1:length(jobtype)){
strjob[i] <- jobtype[[i]][1]
}
table(strjob)
#2
library(readr)
X103_slalry_education <- read_csv("103 slalry education.csv")
X103_slalry_education$`大學-女/男`<- gsub("—",NA,X103_slalry_education$`大學-女/男`)
X103_slalry_education$`大學-女/男`<- gsub("…",NA,X103_slalry_education$`大學-女/男`)
X103_slalry_education$`大學-女/男`<- as.numeric(X103_slalry_education$`大學-女/男`)
X103gender <- select(X103_slalry_education, 大職業別,`大學-女/男`)
X103boy <- filter(X103gender,`大學-女/男`< 100)
head(X103boy[order(X103boy$`大學-女/男`,decreasing = T),], 10)
X103girl <- filter(X103gender,`大學-女/男`> 100)
X104_slalry_education <- read_csv("104 slalry education.csv")
X104_slalry_education$`大學-女/男`<- gsub("—",NA,X104_slalry_education$`大學-女/男`)
X104_slalry_education$`大學-女/男`<- gsub("…",NA,X104_slalry_education$`大學-女/男`)
X104_slalry_education$`大學-女/男`<- as.numeric(X104_slalry_education$`大學-女/男`)
X104gender <- select(X104_slalry_education, 大職業別,`大學-女/男`)
X104boy <- filter(X104gender,`大學-女/男`< 100)
head(X104boy[order(X104boy$`大學-女/男`,decreasing = T),], 10)
X104girl <- filter(X104gender,`大學-女/男`> 100)
X105_slalry_education <- read_csv("105 slalry education.csv")
X105_slalry_education$`大學-女/男`<- gsub("—",NA,X105_slalry_education$`大學-女/男`)
X105_slalry_education$`大學-女/男`<- gsub("…",NA,X105_slalry_education$`大學-女/男`)
X105_slalry_education$`大學-女/男`<- as.numeric(X105_slalry_education$`大學-女/男`)
X105gender <- select(X105_slalry_education, 大職業別,`大學-女/男`)
X105boy <- filter(X105gender,`大學-女/男`< 100)
head(X105boy[order(X105boy$`大學-女/男`,decreasing = T),], 10)
X105girl <- filter(X105gender,`大學-女/男`> 100)
X106_slalry_education <- read_csv("106 slalry education.csv")
X106_slalry_education$大職業別<- gsub("_","、",X106_slalry_education$大職業別)
X106_slalry_education$`大學-女/男`<- gsub("—",NA,X106_slalry_education$`大學-女/男`)
X106_slalry_education$`大學-女/男`<- gsub("…",NA,X106_slalry_education$`大學-女/男`)
X106_slalry_education$`大學-女/男`<- as.numeric(X106_slalry_education$`大學-女/男`)
X106gender <- select(X106_slalry_education, 大職業別,`大學-女/男`)
X106boy <- filter(X106gender,`大學-女/男`< 100)
head(X106boy[order(X106boy$`大學-女/男`,decreasing = T),], 10)
X106girl <- filter(X106gender,`大學-女/男`> 100)
str(X103_slalry_education)
#3
library(readr)
X106_slalry_education <- read_csv("106 slalry education.csv")
X106_slalry_education$`大學-薪資`<- gsub("—",0,X106_slalry_education$`大學-薪資`)
X106_slalry_education$`研究所及以上-薪資`<- gsub("—",0,X106_slalry_education$`研究所及以上-薪資`)
X106_slalry_education$`大學-薪資`<- as.numeric(X106_slalry_education$`大學-薪資`)
X106_slalry_education$`研究所及以上-薪資`<- as.numeric(X106_slalry_education$`研究所及以上-薪資`)
X106_slalry_education$salary_differ <- X106_slalry_education$`研究所及以上-薪資`/ X106_slalry_education$`大學-薪資`
head(X106_slalry_education[order(X106_slalry_education$salary_differ,decreasing = T),], 10)
X106_slalry_education$salary_differ
#4
library(readr)
X106_slalry_education <- read_csv("106 slalry education.csv")
myfavorite <- subset(X106_slalry_education, 大職業別 == "金融及保險業-專業人員" |
大職業別 == "金融及保險業-技術員及助理專業人員" |
大職業別 == "金融及保險業-事務支援人員")
myfavorite <- myfavorite[,c(1,2,11,13)]
myfavorite$`大學-薪資`<- as.numeric(myfavorite$`大學-薪資`)
myfavorite$`研究所及以上-薪資`<- as.numeric(myfavorite$`研究所及以上-薪資`)
knitr::kable(mutate106 <- mutate(myfavorite, Comparesalary_106 = `研究所及以上-薪資` - `大學-薪資`))
|
#Goal: Simulate data with realistic LD information
#Y = M\beta + G\theta + U\beta_u + error_y
#M = G\alpha + U\alpha_U + error_m
#h2_y = var(\beta G\alpha+G\theta) = 0.4
#h2_m = var(\alphaG) = 0.4
#var(error_m+U\alpha_U) = 0.6
#var(error_y+U\beta_U) = 0.6
#causal SNPs proportion for M: 0.1, 0.01
#overlapping between pleotripic and non pleotropic 1, 0.5, 0.75
#i1 for beta
#i2 for ple
#i3 for rep
args = commandArgs(trailingOnly = T)
i1 = as.numeric(args[[1]])
i2 = as.numeric(args[[2]])
i3 = as.numeric(args[[3]])
print(c(i1,i2,i3))
setwd("/data/zhangh24/MR_MA/")
source("./code/simulation/functions/WMR_function.R")
beta_vec = c(1,0.5,0)
pleo_vec = c(1,0.5,0.25)
n.snp = 500
beta = beta_vec[i1]
N = 6000
cau.pro = 0.2
n.cau = as.integer(n.snp*cau.pro)
h2_m = 0.4
h2_y = 0.4
sigma_alpha = h2_m/n.cau
sigma_theta = 0
#alpha_u = sqrt(0.3)
sigma_error_m = 1-h2_m
beta_u = 0
sigma_error_y = 1-h2_y
set.seed(123)
idx.cau_m = sample(c(1:n.snp),n.cau)
#plotropic settings
pleosnp.pro = pleo_vec[i2]
n.cau.overlap = as.integer(pleosnp.pro*n.cau)
n.cau.specific = n.cau - n.cau.overlap
#pleotrpic snps proportion the same as causal snps
idx.cau_pleo = c(sample(idx.cau_m,n.cau.overlap),
sample(setdiff(c(1:n.cau),idx.cau_m),n.cau-n.cau.overlap))
#alpha_G = rnorm(n.cau,mean = 0,sd = sqrt(sigma_alpha))
alpha_G = rep(0.2,n.cau)
theta_G = rnorm(n.cau,mean = 0, sd = sqrt(sigma_theta))
ar1_cor <- function(n, rho) {
exponent <- abs(matrix(1:n - 1, nrow = n, ncol = n, byrow = TRUE) -
(1:n - 1))
rho^exponent
}
library(mr.raps)
library(Rfast)
library(MASS)
library(MESS)
R =ar1_cor(n.snp,0.0)
G1 = rmvnorm(N,mu = rep(0,n.snp),R)
G2 = rmvnorm(N,mu = rep(0,n.snp),R)
U1 = rnorm(N)
U2 = rnorm(N)
G1.cau = G1[,idx.cau_m]
G2.cau = G2[,idx.cau_m]
G1.pleo = G1[,idx.cau_pleo]
G2.pleo = G2[,idx.cau_pleo]
ldscore = rep(sum(R[2:15,]^2),n.snp)
#G1 to obtain sum data for Y
#G2 to obtain sum data for M
n.rep = 100
beta_est = rep(0,n.rep)
beta_cover = rep(0,n.rep)
beta_se = rep(0,n.rep)
beta_est_Raps = rep(0,n.rep)
beta_cover_Raps = rep(0,n.rep)
beta_se_Raps = rep(0,n.rep)
beta_est_IVW = rep(0,n.rep)
beta_cover_IVW = rep(0,n.rep)
beta_se_IVW = rep(0,n.rep)
beta_est_egger = rep(0,n.rep)
beta_cover_egger = rep(0,n.rep)
beta_se_egger = rep(0,n.rep)
beta_est_median = rep(0,n.rep)
beta_cover_median = rep(0,n.rep)
beta_se_median = rep(0,n.rep)
library(MendelianRandomization)
library(susieR)
cor.error = 0.25
sigma_error_m = 0.6
sigma_error_y = 0.6
cov_my = sqrt(sigma_error_m*sigma_error_y)*cor.error
Sigma = matrix(c(sigma_error_m,cov_my,cov_my,sigma_error_y),2,2)
for(k in 1:n.rep){
print(k)
error = mvrnorm(N,mu = c(0,0), Sigma =Sigma)
error_m = error[,1]
error_y = error[,2]
# M1 = G1.cau%*%alpha_G+U1*alpha_u+error_m
# Y1 = M1%*%beta + G1.pleo%*%theta_G+U1*beta_u + error_y
#Y1 = M1%*%beta +U1*beta_u + error_y
M1 = G1.cau%*%alpha_G+error_m
Y1 = M1%*%beta + error_y
error_m = rnorm(N,sd = sqrt(sigma_error_m))
M2 = G2.cau%*%alpha_G+error_m
sumstats <- univariate_regression(G1, Y1)
Gamma = sumstats$betahat
se_Gamma = sumstats$sebetahat
sumstats <- univariate_regression(G2, M2)
alpha = sumstats$betahat
se_alpha = sumstats$sebetahat
p_alpha = 2*pnorm(-abs(alpha/se_alpha),lower.tail = T)
#p_Gamma = 2*pnorm(-abs(Gamma/sqrt(var_Gamma)),lower.tail = T)
Myclumping <- function(R,p){
n.snp = ncol(R)
#keep snps for clumpinp
keep.ind = c(1:n.snp)
#remove snps due to clumping
remove.ind = NULL
#select snp ind
select.ind = NULL
temp = 1
while(length(keep.ind)>0){
# print(temp)
p.temp = p[keep.ind]
#select top snp
top.ind = which.min(p.temp)
select.ind = c(select.ind,keep.ind[top.ind])
#print(keep.ind[top.ind])
#tempory correlation
R.temp = R[keep.ind[top.ind],]
idx.remove = which(R.temp>=0.01)
#take out
remove.ind= c(remove.ind,idx.remove)
keep.ind = setdiff(keep.ind,remove.ind)
temp = temp+1
}
result = data.frame(select.ind,p[select.ind])
return(result)
}
clump.snp = Myclumping(R,p_alpha)
#select.id = clump.snp[clump.snp$p.select.ind.<=5E-08,1]
select.id = idx.cau_m
alpha_select =alpha[select.id]
se_alpha_select = se_alpha[select.id]
Gamma_select = Gamma[select.id]
se_Gamma_select = se_Gamma[select.id]
MRInputObject <- mr_input(bx = alpha_select,
bxse = se_alpha_select,
by = Gamma_select,
byse = se_Gamma_select)
IVWObject <- mr_ivw(MRInputObject,
model = "default",
robust = FALSE,
penalized = FALSE,
correl = FALSE,
weights = "simple",
psi = 0,
distribution =
"normal",
alpha = 0.05)
beta_est_IVW[k] = IVWObject$Estimate
beta_cover_IVW[k] = ifelse(IVWObject$CILower<=beta&
IVWObject$CIUpper>=beta,1,0)
beta_se_IVW[k] = IVWObject$StdError
EggerObject <- mr_egger(
MRInputObject,
robust = FALSE,
penalized = FALSE,
correl = FALSE,
distribution = "normal",
alpha = 0.05
)
beta_est_egger[k] = EggerObject$Estimate
beta_cover_egger[k] = ifelse(EggerObject$CILower.Est<=beta&
EggerObject$CIUpper.Est>=beta,1,0)
beta_se_egger[k] = EggerObject$StdError.Est
MedianObject <- mr_median(
MRInputObject,
weighting = "weighted",
distribution = "normal",
alpha = 0.05,
iterations = 10000,
seed = 314159265
)
beta_est_median[k] = MedianObject$Estimate
beta_cover_median[k] = ifelse(MedianObject$CILower<=beta&
MedianObject$CIUpper>=beta,1,0)
beta_se_median[k] = MedianObject$StdError
raps_result <- mr.raps(data = data.frame(beta.exposure = alpha_select,
beta.outcome = Gamma_select,
se.exposure = se_alpha_select,
se.outcome = se_Gamma_select),
diagnostics = F)
beta_est_Raps[k] = raps_result$beta.hat
beta_cover_Raps[k] = ifelse(raps_result$beta.hat-1.96*raps_result$beta.se<=beta&
raps_result$beta.hat+1.96*raps_result$beta.se>=beta,1,0)
beta_se_Raps[k] = raps_result$beta.se
# se_Gamma = sqrt(var_Gamma)
#se_alpha = sqrt(var_alpha)
#R.select = R[select.id,select.id]
# MR_result <- WMRFun(Gamma,se_Gamma,
# alpha,se_alpha,
# ldscore,R)
# MRWeight(Gamma = sumGamma,
# var_Gamma = var_Gamma,
# alpha = sumalpha,
# var_alpha = var_alpha,
# R = R)
# beta_est[k] = MR_result[1]
# beta_cover[k] = ifelse(MR_result[3]<=beta&MR_result[4]>=beta,1,0)
# beta_se[k] = MR_result[2]
}
mean.result = data.frame(
beta_est,beta_est_IVW,beta_est_egger,beta_est_median,beta_est_Raps
)
colnames(mean.result) = c("WMR","IVW","MR-Egger","MR-median","MRRAPs")
se.result = data.frame(
beta_se,beta_se_IVW,beta_se_egger,beta_se_median,beta_se_Raps
)
colnames(se.result) = c("WMR","IVW","MR-Egger","MR-median","MRRAPs")
cover.result = data.frame(
beta_cover,beta_cover_IVW,beta_cover_egger,beta_cover_median,beta_cover_Raps
)
colnames(cover.result) = c("WMR","IVW","MR-Egger","MR-median","MRRAPs")
result = list(mean.result,se.result,cover.result)
bias = apply(mean.result,2,mean)-beta;
print(bias)
em_se = apply(mean.result,2,sd);
print(em_se)
es_se = apply(se.result,2,mean);
print(es_se)
cover = apply(cover.result,2,mean);
print(cover)
result = list(mean.result,se.result,cover.result)
save(result,file = paste0("./result/simulation/LD_simulation_test/result_",i1,"_",i2,"_",i3,".rdata"))
result = data.frame(
method = c("WMR","IVW","MR-Egger","MR-median","MRRAPs"),
bias = apply(mean.result,2,mean)-beta,
em_se = apply(mean.result,2,sd),
es_se = apply(se.result,2,mean),
cover = apply(cover.result,2,mean))
| /code/simulation/strach/test_code.R | no_license | andrewhaoyu/MR_MA | R | false | false | 8,240 | r | #Goal: Simulate data with realistic LD information
#Y = M\beta + G\theta + U\beta_u + error_y
#M = G\alpha + U\alpha_U + error_m
#h2_y = var(\beta G\alpha+G\theta) = 0.4
#h2_m = var(\alphaG) = 0.4
#var(error_m+U\alpha_U) = 0.6
#var(error_y+U\beta_U) = 0.6
#causal SNPs proportion for M: 0.1, 0.01
#overlapping between pleotripic and non pleotropic 1, 0.5, 0.75
#i1 for beta
#i2 for ple
#i3 for rep
args = commandArgs(trailingOnly = T)
i1 = as.numeric(args[[1]])
i2 = as.numeric(args[[2]])
i3 = as.numeric(args[[3]])
print(c(i1,i2,i3))
setwd("/data/zhangh24/MR_MA/")
source("./code/simulation/functions/WMR_function.R")
beta_vec = c(1,0.5,0)
pleo_vec = c(1,0.5,0.25)
n.snp = 500
beta = beta_vec[i1]
N = 6000
cau.pro = 0.2
n.cau = as.integer(n.snp*cau.pro)
h2_m = 0.4
h2_y = 0.4
sigma_alpha = h2_m/n.cau
sigma_theta = 0
#alpha_u = sqrt(0.3)
sigma_error_m = 1-h2_m
beta_u = 0
sigma_error_y = 1-h2_y
set.seed(123)
idx.cau_m = sample(c(1:n.snp),n.cau)
#plotropic settings
pleosnp.pro = pleo_vec[i2]
n.cau.overlap = as.integer(pleosnp.pro*n.cau)
n.cau.specific = n.cau - n.cau.overlap
#pleotrpic snps proportion the same as causal snps
idx.cau_pleo = c(sample(idx.cau_m,n.cau.overlap),
sample(setdiff(c(1:n.cau),idx.cau_m),n.cau-n.cau.overlap))
#alpha_G = rnorm(n.cau,mean = 0,sd = sqrt(sigma_alpha))
alpha_G = rep(0.2,n.cau)
theta_G = rnorm(n.cau,mean = 0, sd = sqrt(sigma_theta))
ar1_cor <- function(n, rho) {
exponent <- abs(matrix(1:n - 1, nrow = n, ncol = n, byrow = TRUE) -
(1:n - 1))
rho^exponent
}
library(mr.raps)
library(Rfast)
library(MASS)
library(MESS)
R =ar1_cor(n.snp,0.0)
G1 = rmvnorm(N,mu = rep(0,n.snp),R)
G2 = rmvnorm(N,mu = rep(0,n.snp),R)
U1 = rnorm(N)
U2 = rnorm(N)
G1.cau = G1[,idx.cau_m]
G2.cau = G2[,idx.cau_m]
G1.pleo = G1[,idx.cau_pleo]
G2.pleo = G2[,idx.cau_pleo]
ldscore = rep(sum(R[2:15,]^2),n.snp)
#G1 to obtain sum data for Y
#G2 to obtain sum data for M
n.rep = 100
beta_est = rep(0,n.rep)
beta_cover = rep(0,n.rep)
beta_se = rep(0,n.rep)
beta_est_Raps = rep(0,n.rep)
beta_cover_Raps = rep(0,n.rep)
beta_se_Raps = rep(0,n.rep)
beta_est_IVW = rep(0,n.rep)
beta_cover_IVW = rep(0,n.rep)
beta_se_IVW = rep(0,n.rep)
beta_est_egger = rep(0,n.rep)
beta_cover_egger = rep(0,n.rep)
beta_se_egger = rep(0,n.rep)
beta_est_median = rep(0,n.rep)
beta_cover_median = rep(0,n.rep)
beta_se_median = rep(0,n.rep)
library(MendelianRandomization)
library(susieR)
cor.error = 0.25
sigma_error_m = 0.6
sigma_error_y = 0.6
cov_my = sqrt(sigma_error_m*sigma_error_y)*cor.error
Sigma = matrix(c(sigma_error_m,cov_my,cov_my,sigma_error_y),2,2)
for(k in 1:n.rep){
print(k)
error = mvrnorm(N,mu = c(0,0), Sigma =Sigma)
error_m = error[,1]
error_y = error[,2]
# M1 = G1.cau%*%alpha_G+U1*alpha_u+error_m
# Y1 = M1%*%beta + G1.pleo%*%theta_G+U1*beta_u + error_y
#Y1 = M1%*%beta +U1*beta_u + error_y
M1 = G1.cau%*%alpha_G+error_m
Y1 = M1%*%beta + error_y
error_m = rnorm(N,sd = sqrt(sigma_error_m))
M2 = G2.cau%*%alpha_G+error_m
sumstats <- univariate_regression(G1, Y1)
Gamma = sumstats$betahat
se_Gamma = sumstats$sebetahat
sumstats <- univariate_regression(G2, M2)
alpha = sumstats$betahat
se_alpha = sumstats$sebetahat
p_alpha = 2*pnorm(-abs(alpha/se_alpha),lower.tail = T)
#p_Gamma = 2*pnorm(-abs(Gamma/sqrt(var_Gamma)),lower.tail = T)
Myclumping <- function(R,p){
n.snp = ncol(R)
#keep snps for clumpinp
keep.ind = c(1:n.snp)
#remove snps due to clumping
remove.ind = NULL
#select snp ind
select.ind = NULL
temp = 1
while(length(keep.ind)>0){
# print(temp)
p.temp = p[keep.ind]
#select top snp
top.ind = which.min(p.temp)
select.ind = c(select.ind,keep.ind[top.ind])
#print(keep.ind[top.ind])
#tempory correlation
R.temp = R[keep.ind[top.ind],]
idx.remove = which(R.temp>=0.01)
#take out
remove.ind= c(remove.ind,idx.remove)
keep.ind = setdiff(keep.ind,remove.ind)
temp = temp+1
}
result = data.frame(select.ind,p[select.ind])
return(result)
}
clump.snp = Myclumping(R,p_alpha)
#select.id = clump.snp[clump.snp$p.select.ind.<=5E-08,1]
select.id = idx.cau_m
alpha_select =alpha[select.id]
se_alpha_select = se_alpha[select.id]
Gamma_select = Gamma[select.id]
se_Gamma_select = se_Gamma[select.id]
MRInputObject <- mr_input(bx = alpha_select,
bxse = se_alpha_select,
by = Gamma_select,
byse = se_Gamma_select)
IVWObject <- mr_ivw(MRInputObject,
model = "default",
robust = FALSE,
penalized = FALSE,
correl = FALSE,
weights = "simple",
psi = 0,
distribution =
"normal",
alpha = 0.05)
beta_est_IVW[k] = IVWObject$Estimate
beta_cover_IVW[k] = ifelse(IVWObject$CILower<=beta&
IVWObject$CIUpper>=beta,1,0)
beta_se_IVW[k] = IVWObject$StdError
EggerObject <- mr_egger(
MRInputObject,
robust = FALSE,
penalized = FALSE,
correl = FALSE,
distribution = "normal",
alpha = 0.05
)
beta_est_egger[k] = EggerObject$Estimate
beta_cover_egger[k] = ifelse(EggerObject$CILower.Est<=beta&
EggerObject$CIUpper.Est>=beta,1,0)
beta_se_egger[k] = EggerObject$StdError.Est
MedianObject <- mr_median(
MRInputObject,
weighting = "weighted",
distribution = "normal",
alpha = 0.05,
iterations = 10000,
seed = 314159265
)
beta_est_median[k] = MedianObject$Estimate
beta_cover_median[k] = ifelse(MedianObject$CILower<=beta&
MedianObject$CIUpper>=beta,1,0)
beta_se_median[k] = MedianObject$StdError
raps_result <- mr.raps(data = data.frame(beta.exposure = alpha_select,
beta.outcome = Gamma_select,
se.exposure = se_alpha_select,
se.outcome = se_Gamma_select),
diagnostics = F)
beta_est_Raps[k] = raps_result$beta.hat
beta_cover_Raps[k] = ifelse(raps_result$beta.hat-1.96*raps_result$beta.se<=beta&
raps_result$beta.hat+1.96*raps_result$beta.se>=beta,1,0)
beta_se_Raps[k] = raps_result$beta.se
# se_Gamma = sqrt(var_Gamma)
#se_alpha = sqrt(var_alpha)
#R.select = R[select.id,select.id]
# MR_result <- WMRFun(Gamma,se_Gamma,
# alpha,se_alpha,
# ldscore,R)
# MRWeight(Gamma = sumGamma,
# var_Gamma = var_Gamma,
# alpha = sumalpha,
# var_alpha = var_alpha,
# R = R)
# beta_est[k] = MR_result[1]
# beta_cover[k] = ifelse(MR_result[3]<=beta&MR_result[4]>=beta,1,0)
# beta_se[k] = MR_result[2]
}
mean.result = data.frame(
beta_est,beta_est_IVW,beta_est_egger,beta_est_median,beta_est_Raps
)
colnames(mean.result) = c("WMR","IVW","MR-Egger","MR-median","MRRAPs")
se.result = data.frame(
beta_se,beta_se_IVW,beta_se_egger,beta_se_median,beta_se_Raps
)
colnames(se.result) = c("WMR","IVW","MR-Egger","MR-median","MRRAPs")
cover.result = data.frame(
beta_cover,beta_cover_IVW,beta_cover_egger,beta_cover_median,beta_cover_Raps
)
colnames(cover.result) = c("WMR","IVW","MR-Egger","MR-median","MRRAPs")
result = list(mean.result,se.result,cover.result)
bias = apply(mean.result,2,mean)-beta;
print(bias)
em_se = apply(mean.result,2,sd);
print(em_se)
es_se = apply(se.result,2,mean);
print(es_se)
cover = apply(cover.result,2,mean);
print(cover)
result = list(mean.result,se.result,cover.result)
save(result,file = paste0("./result/simulation/LD_simulation_test/result_",i1,"_",i2,"_",i3,".rdata"))
result = data.frame(
method = c("WMR","IVW","MR-Egger","MR-median","MRRAPs"),
bias = apply(mean.result,2,mean)-beta,
em_se = apply(mean.result,2,sd),
es_se = apply(se.result,2,mean),
cover = apply(cover.result,2,mean))
|
##read in main data set
ted_main <- read_csv("TedTalks/data/ted_main.csv")
##read in transcripts
transcripts <- read_csv("TedTalks/data/transcripts.csv")
### Fix url formatting so the two data sets match:
transcripts$url = str_replace_all(transcripts$url, pattern = "\r", replacement = "")
###combine data sets
full_data <- inner_join(ted_main, transcripts, by = "url")
###remove any text in the transcript that is surrounded by parenthesis
for (i in 1:nrow(full_data)){
full_data[i, "transcript"] =
str_replace_all(
full_data[i, "transcript"],
pattern = "\\([^()]+\\)",
" "
)
}
## extract max rating for each talk from the ratings column
for(i in 1:nrow(full_data)) {
rating_string <- str_sub(full_data$ratings[i], 2,-2)
rating_vector <- unlist(strsplit(rating_string, split="}"))
names <- str_extract_all(rating_vector, pattern = "'name': '" %R% one_or_more(WRD) %R% optional('-') %R%
one_or_more(WRD), simplify = T)
names <- str_replace(names, pattern = "'name': '", "")
counts <- str_extract_all(rating_vector, pattern = "'count': " %R% one_or_more(DGT), simplify = T)
counts <- str_replace(counts, pattern = "'count': ", "")
full_data$max_rating[i] <- names[which.max(counts)]
}
#save full_data
save(full_data, file = "TedTalks/data/full_data.Rda")
#use unnest_tokens to create a separate row for each word in each talk
transcripts_clean <- full_data %>% unnest_tokens(word, transcript)
#add a wordcount column to the transcripts_clean data
transcripts_clean <- transcripts_clean %>%
group_by(name) %>%
mutate(wordcount = n()) %>%
ungroup()
save(transcripts_clean, file = "TedTalks/data/transcripts_clean.Rda")
#join transcript data with the bing + nrc lexicons, respectively
sentiments_bing <- transcripts_clean %>% inner_join(get_sentiments("bing")) %>% filter(!word %in% c("like", "right"))
save(sentiments_bing, file = "TedTalks/data/sentiments_bing.Rda")
sentiments_nrc <- transcripts_clean %>% inner_join(get_sentiments("nrc")) %>% filter(!word %in% c("like", "right"))
save(sentiments_nrc, file = "TedTalks/data/sentiments_nrc.Rda")
| /Team 4/TedTalks/02_clean_ted.R | no_license | PHP2560-Statistical-Programming-R/text-mining-review-all-join-this-team | R | false | false | 2,140 | r | ##read in main data set
ted_main <- read_csv("TedTalks/data/ted_main.csv")
##read in transcripts
transcripts <- read_csv("TedTalks/data/transcripts.csv")
### Fix url formatting so the two data sets match:
transcripts$url = str_replace_all(transcripts$url, pattern = "\r", replacement = "")
###combine data sets
full_data <- inner_join(ted_main, transcripts, by = "url")
###remove any text in the transcript that is surrounded by parenthesis
for (i in 1:nrow(full_data)){
full_data[i, "transcript"] =
str_replace_all(
full_data[i, "transcript"],
pattern = "\\([^()]+\\)",
" "
)
}
## extract max rating for each talk from the ratings column
for(i in 1:nrow(full_data)) {
rating_string <- str_sub(full_data$ratings[i], 2,-2)
rating_vector <- unlist(strsplit(rating_string, split="}"))
names <- str_extract_all(rating_vector, pattern = "'name': '" %R% one_or_more(WRD) %R% optional('-') %R%
one_or_more(WRD), simplify = T)
names <- str_replace(names, pattern = "'name': '", "")
counts <- str_extract_all(rating_vector, pattern = "'count': " %R% one_or_more(DGT), simplify = T)
counts <- str_replace(counts, pattern = "'count': ", "")
full_data$max_rating[i] <- names[which.max(counts)]
}
#save full_data
save(full_data, file = "TedTalks/data/full_data.Rda")
#use unnest_tokens to create a separate row for each word in each talk
transcripts_clean <- full_data %>% unnest_tokens(word, transcript)
#add a wordcount column to the transcripts_clean data
transcripts_clean <- transcripts_clean %>%
group_by(name) %>%
mutate(wordcount = n()) %>%
ungroup()
save(transcripts_clean, file = "TedTalks/data/transcripts_clean.Rda")
#join transcript data with the bing + nrc lexicons, respectively
sentiments_bing <- transcripts_clean %>% inner_join(get_sentiments("bing")) %>% filter(!word %in% c("like", "right"))
save(sentiments_bing, file = "TedTalks/data/sentiments_bing.Rda")
sentiments_nrc <- transcripts_clean %>% inner_join(get_sentiments("nrc")) %>% filter(!word %in% c("like", "right"))
save(sentiments_nrc, file = "TedTalks/data/sentiments_nrc.Rda")
|
#### RUN ZIP with host species level covariate: ruminant? (for BC): Model 7
## Set up output files
arguments <- commandArgs(T)
outdir <- arguments[1]
iter <- as.numeric(arguments[2])
## load packages
#library(reshape2) #attach the reshape package for function "melt()"
library(R2jags)
library(rjags)
load("~/bipartitemodelsBC/data/finaldata.RData")
collec.lng$ID <- NULL
covars.host <- read.csv('~/bipartitemodelsBC/data/hostlevel-switchwild.csv', head=T)
covars.host <- covars.host[with(covars.host,order(as.character(Host.Species))),]
load("~/bipartitemodelsBC/data/indlevel.RData")
treated <- as.numeric(covars$Previous.Treatment!="None" & !is.na(covars$Previous.Treatment))
missing <- which(is.na(covars$Previous.Treatment))
str(collec.lng)
long <- as.list(collec.lng)
long$count <- as.integer(long$count)
long$Nobs <- length(long$count)
long$Nhost.sp <- length(unique(long$host.sp))
long$Npar <- length(unique(long$par.sp))
long$par.sp <- as.factor(as.character(long$par.sp))
long$domestic <- as.numeric(covars.host$Wild)
long$treated <- treated
long$missing.ind <- missing
long$ind <- rep(1:(length(long$host.sp)/(long$Npar)),long$Npar)
save(long,file=paste(outdir,"/tcnjlong_mat",iter,".RData",sep=""))
## Define model
modelfile <- "~/bipartitemodelsBC/finalmodels/jagsNB/cnj/cnj-mat-t.txt"
## Define initial values
inits <- function() {
list(
mn=c(0.5,-5),
sd=c(3,1.5),
alpha=matrix(rnorm(long$Npar*long$Nhost.sp,mean=-5),ncol=long$Npar,byrow=T),
alpha_d=rnorm(1),
beta_t=rnorm(1),
beta=matrix(rnorm(long$Npar*long$Nhost.sp,mean=-5),ncol=long$Npar,byrow=T),
use=matrix(rep(1,long$Npar*long$Nhost.sp),ncol=long$Npar,byrow=T)
)
}
## Run model
output <- jags(long, inits = inits, c('mn', 'sd', 'use', 'HB_invert', 'PD_host', 'beta', 'alpha','alpha_d','prec.beta','r','beta_t','hosts','parasites'), modelfile, n.chains=3, n.iter=iter) # or use defaults
save(output, file = paste(outdir,"/tcnj_output_mat",iter,".RData",sep=""))
# calculate convergence
library(jagstools)
library(dplyr)
notconv <- rhats(output) %>% subset(. >= 1.1) %>% length()
params <- length(rhats(output))
options(max.print=100000)
sink(file=paste(outdir,"/tcnj_printoutput_mat",iter,".txt",sep=""))
paste("not converged =", notconv, sep=" ")
paste("total params =", params, sep=" ")
print("which not converged: ")
rhats(output) %>% subset(. >= 1.1)
print(output)
sink()
| /finalmodels/newspec19Jan/trunc-cnj-mat.R | no_license | jogwalker/bipartitemodelsBC | R | false | false | 2,495 | r |
#### RUN ZIP with host species level covariate: ruminant? (for BC): Model 7
## Set up output files
arguments <- commandArgs(T)
outdir <- arguments[1]
iter <- as.numeric(arguments[2])
## load packages
#library(reshape2) #attach the reshape package for function "melt()"
library(R2jags)
library(rjags)
load("~/bipartitemodelsBC/data/finaldata.RData")
collec.lng$ID <- NULL
covars.host <- read.csv('~/bipartitemodelsBC/data/hostlevel-switchwild.csv', head=T)
covars.host <- covars.host[with(covars.host,order(as.character(Host.Species))),]
load("~/bipartitemodelsBC/data/indlevel.RData")
treated <- as.numeric(covars$Previous.Treatment!="None" & !is.na(covars$Previous.Treatment))
missing <- which(is.na(covars$Previous.Treatment))
str(collec.lng)
long <- as.list(collec.lng)
long$count <- as.integer(long$count)
long$Nobs <- length(long$count)
long$Nhost.sp <- length(unique(long$host.sp))
long$Npar <- length(unique(long$par.sp))
long$par.sp <- as.factor(as.character(long$par.sp))
long$domestic <- as.numeric(covars.host$Wild)
long$treated <- treated
long$missing.ind <- missing
long$ind <- rep(1:(length(long$host.sp)/(long$Npar)),long$Npar)
save(long,file=paste(outdir,"/tcnjlong_mat",iter,".RData",sep=""))
## Define model
modelfile <- "~/bipartitemodelsBC/finalmodels/jagsNB/cnj/cnj-mat-t.txt"
## Define initial values
inits <- function() {
list(
mn=c(0.5,-5),
sd=c(3,1.5),
alpha=matrix(rnorm(long$Npar*long$Nhost.sp,mean=-5),ncol=long$Npar,byrow=T),
alpha_d=rnorm(1),
beta_t=rnorm(1),
beta=matrix(rnorm(long$Npar*long$Nhost.sp,mean=-5),ncol=long$Npar,byrow=T),
use=matrix(rep(1,long$Npar*long$Nhost.sp),ncol=long$Npar,byrow=T)
)
}
## Run model
output <- jags(long, inits = inits, c('mn', 'sd', 'use', 'HB_invert', 'PD_host', 'beta', 'alpha','alpha_d','prec.beta','r','beta_t','hosts','parasites'), modelfile, n.chains=3, n.iter=iter) # or use defaults
save(output, file = paste(outdir,"/tcnj_output_mat",iter,".RData",sep=""))
# calculate convergence
library(jagstools)
library(dplyr)
notconv <- rhats(output) %>% subset(. >= 1.1) %>% length()
params <- length(rhats(output))
options(max.print=100000)
sink(file=paste(outdir,"/tcnj_printoutput_mat",iter,".txt",sep=""))
paste("not converged =", notconv, sep=" ")
paste("total params =", params, sep=" ")
print("which not converged: ")
rhats(output) %>% subset(. >= 1.1)
print(output)
sink()
|
# Copyright 2013 Christian Sigg
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
context("nscumcomp.nspca")
test_that("cardinality", {
set.seed(1)
X <- matrix(rnorm(20*10), 20)
nscc <- nscumcomp(X, ncomp = 1, gamma = 1, k = 5, nneg = TRUE)
expect_equal(sum(cardinality(nscc$rotation)), 5)
nscc <- nscumcomp(X, ncomp = 5, gamma = 100, k = 10, nneg = TRUE)
expect_equal(sum(cardinality(nscc$rotation)), 10)
})
test_that("non-negativity", {
set.seed(1)
X <- matrix(rnorm(20*10), 20)
nscc <- nscumcomp(X, ncomp = 5, gamma = 1e2, k = 10, nneg = TRUE)
expect_true(all(nscc$rotation >= 0))
})
test_that("reconstruction", {
set.seed(1)
X <- matrix(runif(5*5), 5)
nscc <- nscumcomp(X, ncomp = 5, k = 20, nneg = TRUE, gamma = 1)
X_hat <- predict(nscc)%*%ginv(nscc$rotation) + matrix(1,5,1) %*% nscc$center
expect_true(norm(X - X_hat, type="F") < 1e-3)
})
test_that("weighted approximation error", {
set.seed(1)
X <- scale(matrix(runif(5*5), 5))
nscc <- nscumcomp(X, omega = c(1,1,1,1,5), ncomp = 3, k = 15, nneg = TRUE, gamma = 1)
X_hat <- predict(nscc)%*%ginv(nscc$rotation)
nrm <- rowSums((X - X_hat)^2)
expect_true(which.min(nrm) == 5)
}) | /data/genthat_extracted_code/nsprcomp/tests/test_nscumcomp_nspca.R | no_license | surayaaramli/typeRrh | R | false | false | 1,799 | r | # Copyright 2013 Christian Sigg
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
context("nscumcomp.nspca")
test_that("cardinality", {
set.seed(1)
X <- matrix(rnorm(20*10), 20)
nscc <- nscumcomp(X, ncomp = 1, gamma = 1, k = 5, nneg = TRUE)
expect_equal(sum(cardinality(nscc$rotation)), 5)
nscc <- nscumcomp(X, ncomp = 5, gamma = 100, k = 10, nneg = TRUE)
expect_equal(sum(cardinality(nscc$rotation)), 10)
})
test_that("non-negativity", {
set.seed(1)
X <- matrix(rnorm(20*10), 20)
nscc <- nscumcomp(X, ncomp = 5, gamma = 1e2, k = 10, nneg = TRUE)
expect_true(all(nscc$rotation >= 0))
})
test_that("reconstruction", {
set.seed(1)
X <- matrix(runif(5*5), 5)
nscc <- nscumcomp(X, ncomp = 5, k = 20, nneg = TRUE, gamma = 1)
X_hat <- predict(nscc)%*%ginv(nscc$rotation) + matrix(1,5,1) %*% nscc$center
expect_true(norm(X - X_hat, type="F") < 1e-3)
})
test_that("weighted approximation error", {
set.seed(1)
X <- scale(matrix(runif(5*5), 5))
nscc <- nscumcomp(X, omega = c(1,1,1,1,5), ncomp = 3, k = 15, nneg = TRUE, gamma = 1)
X_hat <- predict(nscc)%*%ginv(nscc$rotation)
nrm <- rowSums((X - X_hat)^2)
expect_true(which.min(nrm) == 5)
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Constants.R
\docType{data}
\name{sigma}
\alias{sigma}
\title{Constants}
\format{An object of class \code{numeric} of length 1.}
\usage{
sigma
}
\description{
Constants
}
\keyword{datasets}
| /man/sigma.Rd | permissive | Brybrio/TrenchR | R | false | true | 267 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Constants.R
\docType{data}
\name{sigma}
\alias{sigma}
\title{Constants}
\format{An object of class \code{numeric} of length 1.}
\usage{
sigma
}
\description{
Constants
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Categorical_Inference.r
\name{dCategorical}
\alias{dCategorical}
\title{Probability mass function for Categorical distribution}
\usage{
dCategorical(x, p)
}
\arguments{
\item{x}{integer, categorical samples.}
\item{p}{numeric, probabilities.}
}
\value{
A numeric vector of the same length of 'x'.
}
\description{
Calculate probability masses for integer valued Categorical random samples.
For a random variable x, the density function of categorical distribution is defined as
\deqn{prod_{k in 1:K} p_k^{I(x=k)}}
Where K is the number of unique values.
}
\examples{
\donttest{
dCategorical(x=c(1L,2L,1L),p=c(1,2))
}
}
\seealso{
\code{\link{rCategorical}}
}
| /man/dCategorical.Rd | permissive | seanahmad/Bayesian-Bricks | R | false | true | 738 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Categorical_Inference.r
\name{dCategorical}
\alias{dCategorical}
\title{Probability mass function for Categorical distribution}
\usage{
dCategorical(x, p)
}
\arguments{
\item{x}{integer, categorical samples.}
\item{p}{numeric, probabilities.}
}
\value{
A numeric vector of the same length of 'x'.
}
\description{
Calculate probability masses for integer valued Categorical random samples.
For a random variable x, the density function of categorical distribution is defined as
\deqn{prod_{k in 1:K} p_k^{I(x=k)}}
Where K is the number of unique values.
}
\examples{
\donttest{
dCategorical(x=c(1L,2L,1L),p=c(1,2))
}
}
\seealso{
\code{\link{rCategorical}}
}
|
# 0. Load and extract files
# uncomment for download and extract files in working directory
# remove 'method="curl"' if not needed
#url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
#file <- "getdata-projectfiles-UCI HAR Dataset.zip"
#download.file(url,destfile=file) ,method="curl")
#unzip(file)
#rm(url,file)
# 1. Merges the training and the test sets to create one data set.
trainSet <- read.table("UCI HAR Dataset/train/X_train.txt")
testSet <- read.table("UCI HAR Dataset/test/X_test.txt")
dataSet <- rbind(trainSet,testSet)
rm(trainSet,testSet)
colLabels <- read.table("UCI HAR Dataset/features.txt")
names(dataSet) <- colLabels[,2]
rm(colLabels)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
meanstdSet <- subset(dataSet, TRUE, select=grep("mean\\(\\)|std\\(\\)",names(dataSet),value=TRUE))
rm(dataSet)
# 3. Uses descriptive activity names to name the activities in the data set
activityTrain <- read.table("UCI HAR Dataset/train/y_train.txt")
activityTest <- read.table("UCI HAR Dataset/test/y_test.txt")
activityAll <- rbind(activityTrain,activityTest)
rm(activityTrain,activityTest)
factorActivities <- factor(activityAll[,1])
activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
levels(factorActivities) <- tolower(activityLabels[,2])
rm(activityLabels,activityAll)
# 4. Appropriately labels the data set with descriptive variable names.
names(meanstdSet) <- gsub("\\(","",names(meanstdSet))
names(meanstdSet) <- gsub("\\)","",names(meanstdSet))
names(meanstdSet) <- gsub("-",".",names(meanstdSet))
names(meanstdSet) <- gsub("BodyBody","Body",names(meanstdSet))
names(meanstdSet) <- gsub("^t","time",names(meanstdSet))
names(meanstdSet) <- gsub("^f","frequency",names(meanstdSet))
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# subjects
subjectsTrain <- read.table("UCI HAR Dataset/train/subject_train.txt")
subjectsTest <- read.table("UCI HAR Dataset/test/subject_test.txt")
subjectsAll <- rbind(subjectsTrain,subjectsTest)
rm(subjectsTrain,subjectsTest)
factorSubjects <- factor(subjectsAll[,1])
rm(subjectsAll)
tidyDataSet <- aggregate(meanstdSet,by=list(factorSubjects,factorActivities),FUN=mean)
names(tidyDataSet)[1:2] <- c("Subject","Activity")
rm(meanstdSet,factorSubjects,factorActivities)
write.table(tidyDataSet,"tidyDataSet.txt",row.names=FALSE)
# to load data use:
# data <- read.table("tidyDataSet.txt",header=TRUE)
| /run_analysis.R | no_license | Anchoa/Getting-and-Cleaning-Data-Project | R | false | false | 2,555 | r | # 0. Load and extract files
# uncomment for download and extract files in working directory
# remove 'method="curl"' if not needed
#url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
#file <- "getdata-projectfiles-UCI HAR Dataset.zip"
#download.file(url,destfile=file) ,method="curl")
#unzip(file)
#rm(url,file)
# 1. Merges the training and the test sets to create one data set.
trainSet <- read.table("UCI HAR Dataset/train/X_train.txt")
testSet <- read.table("UCI HAR Dataset/test/X_test.txt")
dataSet <- rbind(trainSet,testSet)
rm(trainSet,testSet)
colLabels <- read.table("UCI HAR Dataset/features.txt")
names(dataSet) <- colLabels[,2]
rm(colLabels)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
meanstdSet <- subset(dataSet, TRUE, select=grep("mean\\(\\)|std\\(\\)",names(dataSet),value=TRUE))
rm(dataSet)
# 3. Uses descriptive activity names to name the activities in the data set
activityTrain <- read.table("UCI HAR Dataset/train/y_train.txt")
activityTest <- read.table("UCI HAR Dataset/test/y_test.txt")
activityAll <- rbind(activityTrain,activityTest)
rm(activityTrain,activityTest)
factorActivities <- factor(activityAll[,1])
activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
levels(factorActivities) <- tolower(activityLabels[,2])
rm(activityLabels,activityAll)
# 4. Appropriately labels the data set with descriptive variable names.
names(meanstdSet) <- gsub("\\(","",names(meanstdSet))
names(meanstdSet) <- gsub("\\)","",names(meanstdSet))
names(meanstdSet) <- gsub("-",".",names(meanstdSet))
names(meanstdSet) <- gsub("BodyBody","Body",names(meanstdSet))
names(meanstdSet) <- gsub("^t","time",names(meanstdSet))
names(meanstdSet) <- gsub("^f","frequency",names(meanstdSet))
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# subjects
subjectsTrain <- read.table("UCI HAR Dataset/train/subject_train.txt")
subjectsTest <- read.table("UCI HAR Dataset/test/subject_test.txt")
subjectsAll <- rbind(subjectsTrain,subjectsTest)
rm(subjectsTrain,subjectsTest)
factorSubjects <- factor(subjectsAll[,1])
rm(subjectsAll)
tidyDataSet <- aggregate(meanstdSet,by=list(factorSubjects,factorActivities),FUN=mean)
names(tidyDataSet)[1:2] <- c("Subject","Activity")
rm(meanstdSet,factorSubjects,factorActivities)
write.table(tidyDataSet,"tidyDataSet.txt",row.names=FALSE)
# to load data use:
# data <- read.table("tidyDataSet.txt",header=TRUE)
|
# Reading file
context1 = read.csv('WAGE1.csv')
# Elbow plot with within group sum of squares
seed = 2
maxClusters = 10
wss = matrix(data = 1:10, nrow = maxClusters, ncol = 2)
for (i in 1:maxClusters)
{
set.seed(seed)
model <- kmeans(context1,centers=i,nstart=10)
wss[i,2] <- model$tot.withinss
}
plot(x = wss[,1], y = wss[,2], type="b", xlab="Number of Clusters", ylab="Aggregate Within Group SS")
# Model
set.seed(seed)
model1 = kmeans(context1,centers=3,nstart=10)
context1_wclusters = cbind(context1, model1$cluster)
model1$centers
# Summary
cluster_summary = aggregate( context1[,c('educ','exper','tenure')], by = list(model1$cluster), mean)
cluster_summary
model2 = lm(formula = wage~educ+exper+tenure, data = context1[context1_wclusters[,22] ==1,])
summary(model2)
model3 = lm(formula = wage~educ+exper+tenure, data = context1[context1_wclusters[,22] ==2,])
summary(model3)
model4 = lm(formula = wage~educ+exper+tenure, data = context1[context1_wclusters[,22] ==3,])
summary(model4)
## Interpretations
# 1. Based on the elbow plot k=4 is the optimal number of clusters that should be used
# 2. Based on the three clusters
# Group 1 has most experienced people but leats education
# Group 2 has the most educated people but least tenure
# Group 3 has people with moderate of everything education, experience and tenure
# 3. Difference between model 1, 2 and 3
# Model 1 - exper is not significant, as it has all people with most experience, so experience doesn't affect wages after such long time
# Model 2 - Education has signiifcant effect on wages, that is shown by model. All variables are significant
# Model 3 - Expeirence is not significant. Education and tenure has significant impact on wages of group 3 people
library(tseries)
#Question2
context2 = read.csv("ffportfolios.csv")
cnt =0
for( i in 2:32)
{
if(kpss.test(context2[,i])$statistic > 0.347)
{
print( paste('trend ',i,' is not level stationary for 90% confidence interval'))
cnt = cnt+1
}
}
print( paste('Total ',cnt,' trends that are not level stationary at 90% confidence interval'))
#
model5 <- prcomp(context2[,2:33])
screeplot(model5,type="lines")
factor <- model5$x[,1]
factor <- scale(factor)
hist(factor)
var(factor)
#years where factor is less than -2.58
years_less <- trunc(x = context2[ factor < -2.58, 'Year'], digits = 0)
years_less
# ## Question2
# 1. Based on the screeplot we we should use one principal components
#
# 2. This principal component shows the years with major distinction. It highlights the years where portfolio was at the
# minimum, may be due to economic crises or something similar.
| /R Files/ps5.R | no_license | nishidhvlad/Repository | R | false | false | 2,681 | r |
# Reading file
context1 = read.csv('WAGE1.csv')
# Elbow plot with within group sum of squares
seed = 2
maxClusters = 10
wss = matrix(data = 1:10, nrow = maxClusters, ncol = 2)
for (i in 1:maxClusters)
{
set.seed(seed)
model <- kmeans(context1,centers=i,nstart=10)
wss[i,2] <- model$tot.withinss
}
plot(x = wss[,1], y = wss[,2], type="b", xlab="Number of Clusters", ylab="Aggregate Within Group SS")
# Model
set.seed(seed)
model1 = kmeans(context1,centers=3,nstart=10)
context1_wclusters = cbind(context1, model1$cluster)
model1$centers
# Summary
cluster_summary = aggregate( context1[,c('educ','exper','tenure')], by = list(model1$cluster), mean)
cluster_summary
model2 = lm(formula = wage~educ+exper+tenure, data = context1[context1_wclusters[,22] ==1,])
summary(model2)
model3 = lm(formula = wage~educ+exper+tenure, data = context1[context1_wclusters[,22] ==2,])
summary(model3)
model4 = lm(formula = wage~educ+exper+tenure, data = context1[context1_wclusters[,22] ==3,])
summary(model4)
## Interpretations
# 1. Based on the elbow plot k=4 is the optimal number of clusters that should be used
# 2. Based on the three clusters
# Group 1 has most experienced people but leats education
# Group 2 has the most educated people but least tenure
# Group 3 has people with moderate of everything education, experience and tenure
# 3. Difference between model 1, 2 and 3
# Model 1 - exper is not significant, as it has all people with most experience, so experience doesn't affect wages after such long time
# Model 2 - Education has signiifcant effect on wages, that is shown by model. All variables are significant
# Model 3 - Expeirence is not significant. Education and tenure has significant impact on wages of group 3 people
library(tseries)
#Question2
context2 = read.csv("ffportfolios.csv")
cnt =0
for( i in 2:32)
{
if(kpss.test(context2[,i])$statistic > 0.347)
{
print( paste('trend ',i,' is not level stationary for 90% confidence interval'))
cnt = cnt+1
}
}
print( paste('Total ',cnt,' trends that are not level stationary at 90% confidence interval'))
#
model5 <- prcomp(context2[,2:33])
screeplot(model5,type="lines")
factor <- model5$x[,1]
factor <- scale(factor)
hist(factor)
var(factor)
#years where factor is less than -2.58
years_less <- trunc(x = context2[ factor < -2.58, 'Year'], digits = 0)
years_less
# ## Question2
# 1. Based on the screeplot we we should use one principal components
#
# 2. This principal component shows the years with major distinction. It highlights the years where portfolio was at the
# minimum, may be due to economic crises or something similar.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateFilterValues.R
\name{plotFilterValues}
\alias{plotFilterValues}
\title{Plot filter values using ggplot2.}
\usage{
plotFilterValues(fvalues, sort = "dec", n.show = 20L,
feat.type.cols = FALSE, facet.wrap.nrow = NULL, facet.wrap.ncol = NULL)
}
\arguments{
\item{fvalues}{(\link{FilterValues})\cr
Filter values.}
\item{sort}{(\code{character(1)})\cr
Sort features like this.
\dQuote{dec} = decreasing, \dQuote{inc} = increasing, \dQuote{none} = no sorting.
Default is decreasing.}
\item{n.show}{(\code{integer(1)})\cr
Number of features (maximal) to show.
Default is 20.}
\item{feat.type.cols}{(\code{logical(1)})\cr
Colors for factor and numeric features.
\code{FALSE} means no colors.
Default is \code{FALSE}.}
\item{facet.wrap.nrow, facet.wrap.ncol}{(\link{integer})\cr
Number of rows and columns for facetting. Default for both is \code{NULL}.
In this case ggplot's \code{facet_wrap} will choose the layout itself.}
}
\value{
ggplot2 plot object.
}
\description{
Plot filter values using ggplot2.
}
\examples{
fv = generateFilterValuesData(iris.task, method = "variance")
plotFilterValues(fv)
}
\seealso{
Other filter: \code{\link{filterFeatures}},
\code{\link{generateFilterValuesData}},
\code{\link{getFilterValues}},
\code{\link{getFilteredFeatures}},
\code{\link{listFilterMethods}},
\code{\link{makeFilterWrapper}},
\code{\link{makeFilter}},
\code{\link{plotFilterValuesGGVIS}}
Other generate_plot_data: \code{\link{generateCalibrationData}},
\code{\link{generateCritDifferencesData}},
\code{\link{generateFeatureImportanceData}},
\code{\link{generateFilterValuesData}},
\code{\link{generateLearningCurveData}},
\code{\link{generatePartialDependenceData}},
\code{\link{generateThreshVsPerfData}},
\code{\link{getFilterValues}}
}
| /man/plotFilterValues.Rd | no_license | eleakin/mlr | R | false | true | 1,856 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateFilterValues.R
\name{plotFilterValues}
\alias{plotFilterValues}
\title{Plot filter values using ggplot2.}
\usage{
plotFilterValues(fvalues, sort = "dec", n.show = 20L,
feat.type.cols = FALSE, facet.wrap.nrow = NULL, facet.wrap.ncol = NULL)
}
\arguments{
\item{fvalues}{(\link{FilterValues})\cr
Filter values.}
\item{sort}{(\code{character(1)})\cr
Sort features like this.
\dQuote{dec} = decreasing, \dQuote{inc} = increasing, \dQuote{none} = no sorting.
Default is decreasing.}
\item{n.show}{(\code{integer(1)})\cr
Number of features (maximal) to show.
Default is 20.}
\item{feat.type.cols}{(\code{logical(1)})\cr
Colors for factor and numeric features.
\code{FALSE} means no colors.
Default is \code{FALSE}.}
\item{facet.wrap.nrow, facet.wrap.ncol}{(\link{integer})\cr
Number of rows and columns for facetting. Default for both is \code{NULL}.
In this case ggplot's \code{facet_wrap} will choose the layout itself.}
}
\value{
ggplot2 plot object.
}
\description{
Plot filter values using ggplot2.
}
\examples{
fv = generateFilterValuesData(iris.task, method = "variance")
plotFilterValues(fv)
}
\seealso{
Other filter: \code{\link{filterFeatures}},
\code{\link{generateFilterValuesData}},
\code{\link{getFilterValues}},
\code{\link{getFilteredFeatures}},
\code{\link{listFilterMethods}},
\code{\link{makeFilterWrapper}},
\code{\link{makeFilter}},
\code{\link{plotFilterValuesGGVIS}}
Other generate_plot_data: \code{\link{generateCalibrationData}},
\code{\link{generateCritDifferencesData}},
\code{\link{generateFeatureImportanceData}},
\code{\link{generateFilterValuesData}},
\code{\link{generateLearningCurveData}},
\code{\link{generatePartialDependenceData}},
\code{\link{generateThreshVsPerfData}},
\code{\link{getFilterValues}}
}
|
library(miniUI)
library(CRSSIO)
server <- function(input, output, session) {
isDnfStartYearValid <- reactive({
if (is.null(input$nfInputStartYear))
return(TRUE)
else
as.integer(input$nfInputStartYear) >= 1906
})
isDnfEndAfterStart <- reactive({
if (is.null(input$nfInputStartYear) | is.null(input$nfInputEndYear))
return(TRUE)
else
as.integer(input$nfInputStartYear) <= as.integer(input$nfInputEndYear)
})
output$dnfStartEndErrors <- renderUI({
errMsg <- ""
if (!(is.null(input$nfInputStartYear) | is.null(input$nfInputEndYear))) {
if (!isDnfStartYearValid())
errMsg <- paste0(errMsg, "Start year should be after 1906", br())
if(!isDnfEndAfterStart())
errMsg <- paste0(
errMsg,
"The end date should be after the start date.",
br()
)
}
div(class = "errorMessage", HTML(errMsg))
})
ismRange <- reactive({
if(is.null(input$nfInputStartYear) | is.null(input$nfInputEndYear))
return(10000)
else
as.integer(input$nfInputEndYear) -
as.integer(input$nfInputStartYear) + 1
})
isSimYrsValid <- reactive({
if ( all(
isDnfSelected(),
!is.null(input$simEndYear),
!is.null(input$traceStartYear)
)){
as.integer(input$simEndYear) - as.integer(input$traceStartYear) + 1 <=
ismRange()
} else{
TRUE
}
})
isEndYearValid <- reactive({
if ( all(
isDnfSelected() | isCmipSelected(),
!is.null(input$simEndYear),
!is.null(input$traceStartYear)
))
as.integer(input$simEndYear) >= as.integer(input$traceStartYear)
else
TRUE
})
output$simYrsCheck <- renderUI({
if (!isSimYrsValid())
div(
class = "errorMessage",
HTML("Simulation Years cannot be longer than the number of years in the record from step 1.")
)
else if (!isEndYearValid())
div(
class = "errorMessage",
HTML("The model run end year should be >= the model run start year.")
)
else
HTML("")
})
isOutputFolderValid <- reactive({
if (isDnfSelected() & !is.null(input$selectFolder))
dir.exists(input$selectFolder)
else
TRUE
})
output$checkInputFolder <- renderUI({
if(!isOutputFolderValid())
div(class = "errorMessage",
HTML("Folder does not exist"))
else
HTML("")
})
# check the simulation options ------------------------
output$simStartYearUI <- renderUI({
if (isDnfSelected() | isCmipSelected() | isHistNfSelected())
selectInput(
'traceStartYear',
'Traces Start In:',
choices = seq(2000, 2099),
selected = 2018
)
})
output$simEndYearUI <- renderUI({
if (isDnfSelected() | isCmipSelected())
selectInput(
"simEndYear",
"Traces End In:",
choices = seq(2000, 2099),
selected = 2060
)
})
output$simYearHeader <- renderText({
if (isDnfSelected() | isCmipSelected() | isHistNfSelected())
"Select the simulation start and end years of the CRSS simulations."
else
""
})
output$simYearTitle <- renderText({
if (isDnfSelected() | isCmipSelected() | isHistNfSelected())
"Simulation Start and End Years"
else
""
})
# check the DNF creation options ----------------------
isDnfSelected <- reactive({
!is.null(input$createFiles) & "dnf" %in% input$createFiles
})
output$nfRecordStart <- renderUI({
if (isDnfSelected()) {
selectInput(
"nfInputStartYear",
"Start Year:",
choices = 1906:2020,
selected = 1906
)
} else
return()
})
output$nfRecordEnd <- renderUI({
if (isDnfSelected()) {
selectInput(
"nfInputEndYear",
'End Year',
choices = 1906:2020,
selected = 2015
)
} else
return()
})
output$nfRecordHeader <- renderText({
if (isDnfSelected())
"Select the years to apply ISM to:"
else
return("")
})
output$dnfFolderOut <- renderUI({
if (isDnfSelected())
textInput('selectFolder', 'Select Folder', value = 'C:/')
else
return()
})
output$dnfOverwriteUI <- renderUI({
if (isDnfSelected())
radioButtons(
"overwriteDnf",
label = "Overwrite existing files?",
choices = c("No" = FALSE, "Yes" = TRUE),
selected = FALSE,
inline = TRUE
)
else
return()
})
output$dnfFolderHeader <- renderText({
if (isDnfSelected())
"Select the folder to save the trace files in. The folder should already exist."
else
return("")
})
output$dnfSectionHeader <- renderText({
if (isDnfSelected())
"Create Direct Natural Flow Options"
else
return("")
})
# check CMIP creation options ------------------------
isCmipSelected <- reactive({
!is.null(input$createFiles) & "cmip5" %in% input$createFiles
})
isCmipFileValid <- reactive({
if (isCmipSelected() & !is.null(input$cmipFile))
file.exists(input$cmipFile)
else
TRUE
})
isCmipNCFile <- reactive({
if (isCmipSelected() & !is.null(input$cmipFile))
tools::file_ext(input$cmipFile) == "nc"
else
TRUE
})
output$checkCmip5IFile <- renderUI({
errMsg <- ""
if (!isCmipFileValid())
errMsg <- paste(errMsg, "Netcdf file does not exist.")
if (!isCmipNCFile())
errMsg <- paste(errMsg, "Please specify a '.nc' file.")
div(class = "errorMessage", HTML(errMsg))
})
output$cmipSectionHeader <- renderText({
if (isCmipSelected())
"Create CMIP Natural Flow File Options"
else
""
})
output$cmipInputHeader <- renderText({
if (isCmipSelected())
"Select the input netcdf file and the scenario number you wish to use."
else
""
})
output$cmipInputHeader2 <- renderText({
if (isCmipSelected())
"Select folder to save CMIP natural flow files to."
else
""
})
output$cmipIFileUI <- renderUI({
if (isCmipSelected())
textInput(
"cmipFile",
"Select CMIP netcdf file to use:",
value = "C:/test.nc"
)
})
output$cmipScenNumUI <- renderUI({
if (isCmipSelected())
textInput("cmipScenNum", label = "Scenario number:", value = "5")
})
output$cmipOFolderUI <- renderUI({
if (isCmipSelected())
textInput("cmipOFolder", "Select folder:", value = "C:/")
})
output$cmipOverwriteUI <- renderUI({
if (isCmipSelected())
radioButtons(
"overwriteCmip",
label = "Overwrite existing files?",
choices = c("No" = FALSE, "Yes" = TRUE),
selected = FALSE,
inline = TRUE
)
else
return()
})
isCmipOutputFolderValid <- reactive({
if (isCmipSelected() & !is.null(input$cmipOFolder))
dir.exists(input$cmipOFolder)
else
TRUE
})
output$checkCmip5OFolder <- renderUI({
if(!isCmipOutputFolderValid())
div(class = "errorMessage",
HTML("Folder does not exist"))
else
HTML("")
})
# check the natural flow xlsx creation options -------------
isHistNfSelected <- reactive({
!is.null(input$createFiles) & "histNF" %in% input$createFiles
})
output$xlAvg <- renderUI({
if (isHistNfSelected())
sliderInput(
"xlAvg",
"Select number of years to average when filling LB flow data",
min = 1,
max = 20,
value = 5
)
else
return()
})
output$xlPath <- renderUI({
if (isHistNfSelected())
textInput("xlPath", "Select folder to save file in:", value = "C:/")
else
return()
})
isXlPathValid <- reactive({
if (isHistNfSelected() & !is.null(input$xlPath))
return(dir.exists(input$xlPath))
else
# if you aren't creating the excel file, always return true for this
TRUE
})
output$checkXlFolder <- renderUI({
if(!isXlPathValid())
div(class = "errorMessage", HTML("Folder does not exist"))
else
HTML("")
})
output$histNfSectionHeader <- renderUI({
if (isHistNfSelected())
"Create HistoricalNaturalFlows.xlsx Options"
else
""
})
# check all output errors ----------------------
isAllInputValid <- reactive({
isSimYrsValid() & isOutputFolderValid() & isDnfStartYearValid() &
isDnfEndAfterStart() & isXlPathValid() & isCmipFileValid() &
isCmipOutputFolderValid() & isCmipNCFile()
})
output$checkAllErrors <- renderUI({
if(!isAllInputValid())
div(
class = "errorMessage",
HTML("Please fix errors before clicking run.")
)
else
HTML("")
})
# done --------------
# Listen for 'done' events.
observeEvent(input$done, {
if(isAllInputValid()){
rr <- zoo::as.yearmon(c(paste0(input$nfInputStartYear, "-1"),
paste0(input$nfInputEndYear, "-12")))
if (isDnfSelected()) {
crssi_create_dnf_files(
"CoRiverNF",
oFolder = input$selectFolder,
startYear = as.integer(input$traceStartYear),
endYear = as.integer(input$simEndYear),
recordToUse = rr,
overwriteFiles = as.logical(input$overwriteDnf)
)
message(paste("\nAll DNF trace files have been saved to:",
input$selectFolder))
}
if (isCmipSelected()) {
crssi_create_cmip_nf_files(
input$cmipFile,
oFolder = input$cmipOFolder,
startYear = as.integer(input$traceStartYear),
endYear = as.integer(input$simEndYear),
scenarioNumber = input$cmipScenNum ,
overwriteFiles = as.logical(input$overwriteCmip)
)
message(paste("\nAll CMIP trace files have been saved to:",
input$cmipOFolder))
}
if (isHistNfSelected()) {
crssi_create_hist_nf_xlsx(
as.integer(input$traceStartYear),
nYearAvg = as.integer(input$xlAvg),
oFolder = input$xlPath
)
message(paste("\nHistoricalNaturalFlow.xlsx saved to:", input$xlPath))
}
stopApp()
}
})
}
divHeight <- "50px"
padLeft <- "padding-left: 10px;"
ui <- miniPage(
tags$head(
tags$style(HTML("
.errorMessage {
color: red;
}
"))
),
gadgetTitleBar(
"Create CRSS Input Files",
right = miniTitleBarButton("done","Close and Run", primary = TRUE)
),
miniContentPanel(padding = 0,
# select files to create -------------------------
fillRow(
checkboxGroupInput(
"createFiles",
label = "Select files to create:",
choices = c("DNF Files" = "dnf", "CMIP Files" = "cmip5",
"HistoricalNaturalFlows.xlsx" = "histNF"),
selected = c("dnf", "histNF"),
inline = TRUE
),
height = divHeight,
style = padLeft
),
# simulation start and end years ------------
h4(htmlOutput("simYearTitle"), "style" = padLeft),
h5(htmlOutput("simYearHeader"), "style" = padLeft),
fillRow(
uiOutput("simStartYearUI"),
uiOutput("simEndYearUI"),
htmlOutput("simYrsCheck"),
height = divHeight,
"style" = padLeft
),
# show observed record options -----------------
h4(htmlOutput("dnfSectionHeader"), "style" = padLeft),
h5(htmlOutput("nfRecordHeader"), "style" = padLeft),
fillRow(
uiOutput("nfRecordStart"),
uiOutput("nfRecordEnd"),
htmlOutput("dnfStartEndErrors"),
height = divHeight,
"style" = padLeft
),
h5(htmlOutput("dnfFolderHeader"), "style" = padLeft),
fillRow(
uiOutput("dnfFolderOut"),
uiOutput("dnfOverwriteUI"),
htmlOutput("checkInputFolder"),
height = divHeight,
"style" = padLeft
),
# show CMIP options -------------------------------
h4(htmlOutput("cmipSectionHeader"), "style" = padLeft),
h5(htmlOutput("cmipInputHeader"), "style" = padLeft),
fillRow(
uiOutput("cmipIFileUI"),
uiOutput("cmipScenNumUI"),
htmlOutput("checkCmip5IFile"),
height = divHeight,
"style" = padLeft
),
h5(htmlOutput("cmipInputHeader2"), "style" = padLeft),
fillRow(
uiOutput("cmipOFolderUI"),
uiOutput("cmipOverwriteUI"),
htmlOutput("checkCmip5OFolder"),
height = divHeight,
"style" = padLeft
),
# if xlsx, select the parameters of that file -------------------
h4(htmlOutput("histNfSectionHeader"), "style" = padLeft),
fillRow(
uiOutput("xlAvg"),
uiOutput("xlPath"),
htmlOutput("checkXlFolder"),
height = divHeight,
"style" = padLeft
),
br(), br(), br(), br(),
# final validation ----------------
fillRow(
htmlOutput("checkAllErrors"),
height = divHeight,
"style" = "padding-left: 10px; padding-top: 50px"
)
)
)
shinyApp(ui = ui, server = server)
| /__app.R | no_license | BoulderCodeHub/CRSSIO | R | false | false | 14,466 | r | library(miniUI)
library(CRSSIO)
server <- function(input, output, session) {
isDnfStartYearValid <- reactive({
if (is.null(input$nfInputStartYear))
return(TRUE)
else
as.integer(input$nfInputStartYear) >= 1906
})
isDnfEndAfterStart <- reactive({
if (is.null(input$nfInputStartYear) | is.null(input$nfInputEndYear))
return(TRUE)
else
as.integer(input$nfInputStartYear) <= as.integer(input$nfInputEndYear)
})
output$dnfStartEndErrors <- renderUI({
errMsg <- ""
if (!(is.null(input$nfInputStartYear) | is.null(input$nfInputEndYear))) {
if (!isDnfStartYearValid())
errMsg <- paste0(errMsg, "Start year should be after 1906", br())
if(!isDnfEndAfterStart())
errMsg <- paste0(
errMsg,
"The end date should be after the start date.",
br()
)
}
div(class = "errorMessage", HTML(errMsg))
})
ismRange <- reactive({
if(is.null(input$nfInputStartYear) | is.null(input$nfInputEndYear))
return(10000)
else
as.integer(input$nfInputEndYear) -
as.integer(input$nfInputStartYear) + 1
})
isSimYrsValid <- reactive({
if ( all(
isDnfSelected(),
!is.null(input$simEndYear),
!is.null(input$traceStartYear)
)){
as.integer(input$simEndYear) - as.integer(input$traceStartYear) + 1 <=
ismRange()
} else{
TRUE
}
})
isEndYearValid <- reactive({
if ( all(
isDnfSelected() | isCmipSelected(),
!is.null(input$simEndYear),
!is.null(input$traceStartYear)
))
as.integer(input$simEndYear) >= as.integer(input$traceStartYear)
else
TRUE
})
output$simYrsCheck <- renderUI({
if (!isSimYrsValid())
div(
class = "errorMessage",
HTML("Simulation Years cannot be longer than the number of years in the record from step 1.")
)
else if (!isEndYearValid())
div(
class = "errorMessage",
HTML("The model run end year should be >= the model run start year.")
)
else
HTML("")
})
isOutputFolderValid <- reactive({
if (isDnfSelected() & !is.null(input$selectFolder))
dir.exists(input$selectFolder)
else
TRUE
})
output$checkInputFolder <- renderUI({
if(!isOutputFolderValid())
div(class = "errorMessage",
HTML("Folder does not exist"))
else
HTML("")
})
# check the simulation options ------------------------
output$simStartYearUI <- renderUI({
if (isDnfSelected() | isCmipSelected() | isHistNfSelected())
selectInput(
'traceStartYear',
'Traces Start In:',
choices = seq(2000, 2099),
selected = 2018
)
})
output$simEndYearUI <- renderUI({
if (isDnfSelected() | isCmipSelected())
selectInput(
"simEndYear",
"Traces End In:",
choices = seq(2000, 2099),
selected = 2060
)
})
output$simYearHeader <- renderText({
if (isDnfSelected() | isCmipSelected() | isHistNfSelected())
"Select the simulation start and end years of the CRSS simulations."
else
""
})
output$simYearTitle <- renderText({
if (isDnfSelected() | isCmipSelected() | isHistNfSelected())
"Simulation Start and End Years"
else
""
})
# check the DNF creation options ----------------------
isDnfSelected <- reactive({
!is.null(input$createFiles) & "dnf" %in% input$createFiles
})
output$nfRecordStart <- renderUI({
if (isDnfSelected()) {
selectInput(
"nfInputStartYear",
"Start Year:",
choices = 1906:2020,
selected = 1906
)
} else
return()
})
output$nfRecordEnd <- renderUI({
if (isDnfSelected()) {
selectInput(
"nfInputEndYear",
'End Year',
choices = 1906:2020,
selected = 2015
)
} else
return()
})
output$nfRecordHeader <- renderText({
if (isDnfSelected())
"Select the years to apply ISM to:"
else
return("")
})
output$dnfFolderOut <- renderUI({
if (isDnfSelected())
textInput('selectFolder', 'Select Folder', value = 'C:/')
else
return()
})
output$dnfOverwriteUI <- renderUI({
if (isDnfSelected())
radioButtons(
"overwriteDnf",
label = "Overwrite existing files?",
choices = c("No" = FALSE, "Yes" = TRUE),
selected = FALSE,
inline = TRUE
)
else
return()
})
output$dnfFolderHeader <- renderText({
if (isDnfSelected())
"Select the folder to save the trace files in. The folder should already exist."
else
return("")
})
output$dnfSectionHeader <- renderText({
if (isDnfSelected())
"Create Direct Natural Flow Options"
else
return("")
})
# check CMIP creation options ------------------------
isCmipSelected <- reactive({
!is.null(input$createFiles) & "cmip5" %in% input$createFiles
})
isCmipFileValid <- reactive({
if (isCmipSelected() & !is.null(input$cmipFile))
file.exists(input$cmipFile)
else
TRUE
})
isCmipNCFile <- reactive({
if (isCmipSelected() & !is.null(input$cmipFile))
tools::file_ext(input$cmipFile) == "nc"
else
TRUE
})
output$checkCmip5IFile <- renderUI({
errMsg <- ""
if (!isCmipFileValid())
errMsg <- paste(errMsg, "Netcdf file does not exist.")
if (!isCmipNCFile())
errMsg <- paste(errMsg, "Please specify a '.nc' file.")
div(class = "errorMessage", HTML(errMsg))
})
output$cmipSectionHeader <- renderText({
if (isCmipSelected())
"Create CMIP Natural Flow File Options"
else
""
})
output$cmipInputHeader <- renderText({
if (isCmipSelected())
"Select the input netcdf file and the scenario number you wish to use."
else
""
})
output$cmipInputHeader2 <- renderText({
if (isCmipSelected())
"Select folder to save CMIP natural flow files to."
else
""
})
output$cmipIFileUI <- renderUI({
if (isCmipSelected())
textInput(
"cmipFile",
"Select CMIP netcdf file to use:",
value = "C:/test.nc"
)
})
output$cmipScenNumUI <- renderUI({
if (isCmipSelected())
textInput("cmipScenNum", label = "Scenario number:", value = "5")
})
output$cmipOFolderUI <- renderUI({
if (isCmipSelected())
textInput("cmipOFolder", "Select folder:", value = "C:/")
})
output$cmipOverwriteUI <- renderUI({
if (isCmipSelected())
radioButtons(
"overwriteCmip",
label = "Overwrite existing files?",
choices = c("No" = FALSE, "Yes" = TRUE),
selected = FALSE,
inline = TRUE
)
else
return()
})
isCmipOutputFolderValid <- reactive({
if (isCmipSelected() & !is.null(input$cmipOFolder))
dir.exists(input$cmipOFolder)
else
TRUE
})
output$checkCmip5OFolder <- renderUI({
if(!isCmipOutputFolderValid())
div(class = "errorMessage",
HTML("Folder does not exist"))
else
HTML("")
})
# check the natural flow xlsx creation options -------------
isHistNfSelected <- reactive({
!is.null(input$createFiles) & "histNF" %in% input$createFiles
})
output$xlAvg <- renderUI({
if (isHistNfSelected())
sliderInput(
"xlAvg",
"Select number of years to average when filling LB flow data",
min = 1,
max = 20,
value = 5
)
else
return()
})
output$xlPath <- renderUI({
if (isHistNfSelected())
textInput("xlPath", "Select folder to save file in:", value = "C:/")
else
return()
})
isXlPathValid <- reactive({
if (isHistNfSelected() & !is.null(input$xlPath))
return(dir.exists(input$xlPath))
else
# if you aren't creating the excel file, always return true for this
TRUE
})
output$checkXlFolder <- renderUI({
if(!isXlPathValid())
div(class = "errorMessage", HTML("Folder does not exist"))
else
HTML("")
})
output$histNfSectionHeader <- renderUI({
if (isHistNfSelected())
"Create HistoricalNaturalFlows.xlsx Options"
else
""
})
# check all output errors ----------------------
isAllInputValid <- reactive({
isSimYrsValid() & isOutputFolderValid() & isDnfStartYearValid() &
isDnfEndAfterStart() & isXlPathValid() & isCmipFileValid() &
isCmipOutputFolderValid() & isCmipNCFile()
})
output$checkAllErrors <- renderUI({
if(!isAllInputValid())
div(
class = "errorMessage",
HTML("Please fix errors before clicking run.")
)
else
HTML("")
})
# done --------------
# Listen for 'done' events.
observeEvent(input$done, {
if(isAllInputValid()){
rr <- zoo::as.yearmon(c(paste0(input$nfInputStartYear, "-1"),
paste0(input$nfInputEndYear, "-12")))
if (isDnfSelected()) {
crssi_create_dnf_files(
"CoRiverNF",
oFolder = input$selectFolder,
startYear = as.integer(input$traceStartYear),
endYear = as.integer(input$simEndYear),
recordToUse = rr,
overwriteFiles = as.logical(input$overwriteDnf)
)
message(paste("\nAll DNF trace files have been saved to:",
input$selectFolder))
}
if (isCmipSelected()) {
crssi_create_cmip_nf_files(
input$cmipFile,
oFolder = input$cmipOFolder,
startYear = as.integer(input$traceStartYear),
endYear = as.integer(input$simEndYear),
scenarioNumber = input$cmipScenNum ,
overwriteFiles = as.logical(input$overwriteCmip)
)
message(paste("\nAll CMIP trace files have been saved to:",
input$cmipOFolder))
}
if (isHistNfSelected()) {
crssi_create_hist_nf_xlsx(
as.integer(input$traceStartYear),
nYearAvg = as.integer(input$xlAvg),
oFolder = input$xlPath
)
message(paste("\nHistoricalNaturalFlow.xlsx saved to:", input$xlPath))
}
stopApp()
}
})
}
divHeight <- "50px"
padLeft <- "padding-left: 10px;"
ui <- miniPage(
tags$head(
tags$style(HTML("
.errorMessage {
color: red;
}
"))
),
gadgetTitleBar(
"Create CRSS Input Files",
right = miniTitleBarButton("done","Close and Run", primary = TRUE)
),
miniContentPanel(padding = 0,
# select files to create -------------------------
fillRow(
checkboxGroupInput(
"createFiles",
label = "Select files to create:",
choices = c("DNF Files" = "dnf", "CMIP Files" = "cmip5",
"HistoricalNaturalFlows.xlsx" = "histNF"),
selected = c("dnf", "histNF"),
inline = TRUE
),
height = divHeight,
style = padLeft
),
# simulation start and end years ------------
h4(htmlOutput("simYearTitle"), "style" = padLeft),
h5(htmlOutput("simYearHeader"), "style" = padLeft),
fillRow(
uiOutput("simStartYearUI"),
uiOutput("simEndYearUI"),
htmlOutput("simYrsCheck"),
height = divHeight,
"style" = padLeft
),
# show observed record options -----------------
h4(htmlOutput("dnfSectionHeader"), "style" = padLeft),
h5(htmlOutput("nfRecordHeader"), "style" = padLeft),
fillRow(
uiOutput("nfRecordStart"),
uiOutput("nfRecordEnd"),
htmlOutput("dnfStartEndErrors"),
height = divHeight,
"style" = padLeft
),
h5(htmlOutput("dnfFolderHeader"), "style" = padLeft),
fillRow(
uiOutput("dnfFolderOut"),
uiOutput("dnfOverwriteUI"),
htmlOutput("checkInputFolder"),
height = divHeight,
"style" = padLeft
),
# show CMIP options -------------------------------
h4(htmlOutput("cmipSectionHeader"), "style" = padLeft),
h5(htmlOutput("cmipInputHeader"), "style" = padLeft),
fillRow(
uiOutput("cmipIFileUI"),
uiOutput("cmipScenNumUI"),
htmlOutput("checkCmip5IFile"),
height = divHeight,
"style" = padLeft
),
h5(htmlOutput("cmipInputHeader2"), "style" = padLeft),
fillRow(
uiOutput("cmipOFolderUI"),
uiOutput("cmipOverwriteUI"),
htmlOutput("checkCmip5OFolder"),
height = divHeight,
"style" = padLeft
),
# if xlsx, select the parameters of that file -------------------
h4(htmlOutput("histNfSectionHeader"), "style" = padLeft),
fillRow(
uiOutput("xlAvg"),
uiOutput("xlPath"),
htmlOutput("checkXlFolder"),
height = divHeight,
"style" = padLeft
),
br(), br(), br(), br(),
# final validation ----------------
fillRow(
htmlOutput("checkAllErrors"),
height = divHeight,
"style" = "padding-left: 10px; padding-top: 50px"
)
)
)
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotPanel.R
\name{descriptiveKineticGaitPanel}
\alias{descriptiveKineticGaitPanel}
\title{descriptiveKineticGaitPanel}
\usage{
descriptiveKineticGaitPanel(descStatsFrameSequence, descStatsPhases,
iContext, colorFactor = NULL, linetypeFactor = NULL,
normativeData = NULL, stdCorridorFlag = FALSE,
manualLineType = NULL, manualSizeType = NULL)
}
\arguments{
\item{descStatsFrameSequence}{[dataframe] descriptive stats table of all frame sequences}
\item{descStatsPhases}{[dataframe] descriptive stats table of gait phase scalar ()}
\item{iContext}{[string] context of the frame sequence}
\item{colorFactor}{[string] line color according an independant variable}
\item{linetypeFactor}{[string] line type definied according an independant variable}
\item{normativeData}{[dataframe] table of a normative dataset}
\item{stdCorridorFlag}{[Bool] add std corridor to plot}
\item{manualLineType}{[list] manual line type ( see ggplot2 doc)}
\item{manualSizeType}{[float] manual line size ( see ggplot2 doc)}
}
\value{
fig [ggplot2 figure]
}
\description{
convenient descriptive plot panel of gait kinetics for a specific context
}
\section{Warning}{
}
\examples{
}
| /man/descriptiveKineticGaitPanel.Rd | no_license | pyCGM2/rCGM2 | R | false | true | 1,249 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotPanel.R
\name{descriptiveKineticGaitPanel}
\alias{descriptiveKineticGaitPanel}
\title{descriptiveKineticGaitPanel}
\usage{
descriptiveKineticGaitPanel(descStatsFrameSequence, descStatsPhases,
iContext, colorFactor = NULL, linetypeFactor = NULL,
normativeData = NULL, stdCorridorFlag = FALSE,
manualLineType = NULL, manualSizeType = NULL)
}
\arguments{
\item{descStatsFrameSequence}{[dataframe] descriptive stats table of all frame sequences}
\item{descStatsPhases}{[dataframe] descriptive stats table of gait phase scalar ()}
\item{iContext}{[string] context of the frame sequence}
\item{colorFactor}{[string] line color according an independant variable}
\item{linetypeFactor}{[string] line type definied according an independant variable}
\item{normativeData}{[dataframe] table of a normative dataset}
\item{stdCorridorFlag}{[Bool] add std corridor to plot}
\item{manualLineType}{[list] manual line type ( see ggplot2 doc)}
\item{manualSizeType}{[float] manual line size ( see ggplot2 doc)}
}
\value{
fig [ggplot2 figure]
}
\description{
convenient descriptive plot panel of gait kinetics for a specific context
}
\section{Warning}{
}
\examples{
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/my_func.r
\name{unscale}
\alias{unscale}
\title{Accept the result of scale and perform the inverse transformation}
\usage{
unscale(data_scaled)
}
\arguments{
\item{data_scaled}{the result of scale}
}
\value{
the result of unscale
}
\description{
Accept the result of scale and perform the inverse transformation
}
\examples{
unscale(scale(1:5))
}
| /man/unscale.Rd | no_license | yinanhe/heyinan | R | false | true | 425 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/my_func.r
\name{unscale}
\alias{unscale}
\title{Accept the result of scale and perform the inverse transformation}
\usage{
unscale(data_scaled)
}
\arguments{
\item{data_scaled}{the result of scale}
}
\value{
the result of unscale
}
\description{
Accept the result of scale and perform the inverse transformation
}
\examples{
unscale(scale(1:5))
}
|
#' Ordinary least squares regression
#'
#' @description Ordinary least squares regression.
#'
#' @param object An object of class "formula" (or one that can be coerced to
#' that class): a symbolic description of the model to be fitted or class
#' \code{lm}.
#'
#' @param ... Other inputs.
#'
#' @return \code{ols_regress} returns an object of class \code{"ols_regress"}.
#' An object of class \code{"ols_regress"} is a list containing the following
#' components:
#'
#' \item{r}{square root of rsquare, correlation between observed and predicted values of dependent variable}
#' \item{rsq}{coefficient of determination or r-square}
#' \item{adjr}{adjusted rsquare}
#' \item{sigma}{root mean squared error}
#' \item{cv}{coefficient of variation}
#' \item{mse}{mean squared error}
#' \item{mae}{mean absolute error}
#' \item{aic}{akaike information criteria}
#' \item{sbc}{bayesian information criteria}
#' \item{sbic}{sawa bayesian information criteria}
#' \item{prsq}{predicted rsquare}
#' \item{error_df}{residual degrees of freedom}
#' \item{model_df}{regression degrees of freedom}
#' \item{total_df}{total degrees of freedom}
#' \item{ess}{error sum of squares}
#' \item{rss}{regression sum of squares}
#' \item{tss}{total sum of squares}
#' \item{rms}{regression mean square}
#' \item{ems}{error mean square}
#' \item{f}{f statistis}
#' \item{p}{p-value for \code{f}}
#' \item{n}{number of predictors including intercept}
#' \item{betas}{betas; estimated coefficients}
#' \item{sbetas}{standardized betas}
#' \item{std_errors}{standard errors}
#' \item{tvalues}{t values}
#' \item{pvalues}{p-value of \code{tvalues}}
#' \item{df}{degrees of freedom of \code{betas}}
#' \item{conf_lm}{confidence intervals for coefficients}
#' \item{title}{title for the model}
#' \item{dependent}{character vector; name of the dependent variable}
#' \item{predictors}{character vector; name of the predictor variables}
#' \item{mvars}{character vector; name of the predictor variables including intercept}
#' \item{model}{input model for \code{ols_regress}}
#'
#' @section Interaction Terms:
#' If the model includes interaction terms, the standardized betas
#' are computed after scaling and centering the predictors.
#'
#' @references https://www.ssc.wisc.edu/~hemken/Stataworkshops/stdBeta/Getting%20Standardized%20Coefficients%20Right.pdf
#'
#' @examples
#' ols_regress(mpg ~ disp + hp + wt, data = mtcars)
#'
#' # if model includes interaction terms set iterm to TRUE
#' ols_regress(mpg ~ disp * wt, data = mtcars, iterm = TRUE)
#'
#' @export
#'
ols_regress <- function(object, ...) UseMethod("ols_regress")
#' @export
#'
ols_regress.default <- function(object, data, conf.level = 0.95,
iterm = FALSE, title = "model", ...) {
if (missing(data)) {
stop("data missing", call. = FALSE)
}
if (!is.numeric(conf.level)) {
stop("conf.level must be numeric", call. = FALSE)
}
if ((conf.level < 0) | (conf.level > 1)) {
stop("conf.level must be between 0 and 1", call. = FALSE)
}
check_logic(iterm)
if (!is.character(title)) {
stop(paste(title, "is not a string, Please specify a string as title."), call. = FALSE)
}
# detect if model formula includes interaction terms
if (inherits(object, "formula")) {
detect_iterm <- grepl(object, pattern = "\\*")[3]
} else {
detect_iterm <- grepl(object, pattern = "\\*")
}
# set interaction to TRUE if formula contains interaction terms
if (detect_iterm) {
iterm <- TRUE
}
result <- reg_comp(object, data, conf.level, iterm, title)
class(result) <- "ols_regress"
return(result)
}
#' @rdname ols_regress
#' @export
#'
ols_regress.lm <- function(object, ...) {
check_model(object)
formula <- formula(object)
data <- eval(object$call$data)
ols_regress.default(object = formula, data = data)
}
#' @export
#'
print.ols_regress <- function(x, ...) {
print_reg(x)
}
| /R/ols-regression.R | no_license | AminHP/olsrr | R | false | false | 3,914 | r | #' Ordinary least squares regression
#'
#' @description Ordinary least squares regression.
#'
#' @param object An object of class "formula" (or one that can be coerced to
#' that class): a symbolic description of the model to be fitted or class
#' \code{lm}.
#'
#' @param ... Other inputs.
#'
#' @return \code{ols_regress} returns an object of class \code{"ols_regress"}.
#' An object of class \code{"ols_regress"} is a list containing the following
#' components:
#'
#' \item{r}{square root of rsquare, correlation between observed and predicted values of dependent variable}
#' \item{rsq}{coefficient of determination or r-square}
#' \item{adjr}{adjusted rsquare}
#' \item{sigma}{root mean squared error}
#' \item{cv}{coefficient of variation}
#' \item{mse}{mean squared error}
#' \item{mae}{mean absolute error}
#' \item{aic}{akaike information criteria}
#' \item{sbc}{bayesian information criteria}
#' \item{sbic}{sawa bayesian information criteria}
#' \item{prsq}{predicted rsquare}
#' \item{error_df}{residual degrees of freedom}
#' \item{model_df}{regression degrees of freedom}
#' \item{total_df}{total degrees of freedom}
#' \item{ess}{error sum of squares}
#' \item{rss}{regression sum of squares}
#' \item{tss}{total sum of squares}
#' \item{rms}{regression mean square}
#' \item{ems}{error mean square}
#' \item{f}{f statistis}
#' \item{p}{p-value for \code{f}}
#' \item{n}{number of predictors including intercept}
#' \item{betas}{betas; estimated coefficients}
#' \item{sbetas}{standardized betas}
#' \item{std_errors}{standard errors}
#' \item{tvalues}{t values}
#' \item{pvalues}{p-value of \code{tvalues}}
#' \item{df}{degrees of freedom of \code{betas}}
#' \item{conf_lm}{confidence intervals for coefficients}
#' \item{title}{title for the model}
#' \item{dependent}{character vector; name of the dependent variable}
#' \item{predictors}{character vector; name of the predictor variables}
#' \item{mvars}{character vector; name of the predictor variables including intercept}
#' \item{model}{input model for \code{ols_regress}}
#'
#' @section Interaction Terms:
#' If the model includes interaction terms, the standardized betas
#' are computed after scaling and centering the predictors.
#'
#' @references https://www.ssc.wisc.edu/~hemken/Stataworkshops/stdBeta/Getting%20Standardized%20Coefficients%20Right.pdf
#'
#' @examples
#' ols_regress(mpg ~ disp + hp + wt, data = mtcars)
#'
#' # if model includes interaction terms set iterm to TRUE
#' ols_regress(mpg ~ disp * wt, data = mtcars, iterm = TRUE)
#'
#' @export
#'
ols_regress <- function(object, ...) UseMethod("ols_regress")
#' @export
#'
ols_regress.default <- function(object, data, conf.level = 0.95,
iterm = FALSE, title = "model", ...) {
if (missing(data)) {
stop("data missing", call. = FALSE)
}
if (!is.numeric(conf.level)) {
stop("conf.level must be numeric", call. = FALSE)
}
if ((conf.level < 0) | (conf.level > 1)) {
stop("conf.level must be between 0 and 1", call. = FALSE)
}
check_logic(iterm)
if (!is.character(title)) {
stop(paste(title, "is not a string, Please specify a string as title."), call. = FALSE)
}
# detect if model formula includes interaction terms
if (inherits(object, "formula")) {
detect_iterm <- grepl(object, pattern = "\\*")[3]
} else {
detect_iterm <- grepl(object, pattern = "\\*")
}
# set interaction to TRUE if formula contains interaction terms
if (detect_iterm) {
iterm <- TRUE
}
result <- reg_comp(object, data, conf.level, iterm, title)
class(result) <- "ols_regress"
return(result)
}
#' @rdname ols_regress
#' @export
#'
ols_regress.lm <- function(object, ...) {
check_model(object)
formula <- formula(object)
data <- eval(object$call$data)
ols_regress.default(object = formula, data = data)
}
#' @export
#'
print.ols_regress <- function(x, ...) {
print_reg(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/render_depth.R
\name{render_depth}
\alias{render_depth}
\title{Render Depth of Field}
\usage{
render_depth(
focus = NULL,
focallength = 100,
fstop = 4,
filename = NULL,
preview_focus = FALSE,
bokehshape = "circle",
bokehintensity = 1,
bokehlimit = 0.8,
rotation = 0,
gamma_correction = TRUE,
aberration = 0,
transparent_water = FALSE,
heightmap = NULL,
zscale = NULL,
title_text = NULL,
title_offset = c(20, 20),
title_color = "black",
title_size = 30,
title_font = "sans",
title_bar_color = NULL,
title_bar_alpha = 0.5,
title_position = "northwest",
image_overlay = NULL,
vignette = FALSE,
vignette_color = "black",
vignette_radius = 1.3,
progbar = interactive(),
software_render = FALSE,
width = NULL,
height = NULL,
camera_location = NULL,
camera_lookat = c(0, 0, 0),
background = "white",
text_angle = NULL,
text_size = 10,
text_offset = c(0, 0, 0),
point_radius = 0.5,
line_offset = 1e-07,
cache_scene = FALSE,
reset_scene_cache = FALSE,
print_scene_info = FALSE,
instant_capture = interactive(),
clear = FALSE,
bring_to_front = FALSE,
...
)
}
\arguments{
\item{focus}{Focal point. Defaults to the center of the bounding box. Depth in which to blur, in distance to the camera plane.}
\item{focallength}{Default `1`. Focal length of the virtual camera.}
\item{fstop}{Default `1`. F-stop of the virtual camera.}
\item{filename}{The filename of the image to be saved. If this is not given, the image will be plotted instead.}
\item{preview_focus}{Default `FALSE`. If `TRUE`, a red line will be drawn across the image
showing where the camera will be focused.}
\item{bokehshape}{Default `circle`. Also built-in: `hex`. The shape of the bokeh.}
\item{bokehintensity}{Default `3`. Intensity of the bokeh when the pixel intensity is greater than `bokehlimit`.}
\item{bokehlimit}{Default `0.8`. Limit after which the bokeh intensity is increased by `bokehintensity`.}
\item{rotation}{Default `0`. Number of degrees to rotate the hexagon bokeh shape.}
\item{gamma_correction}{Default `TRUE`. Controls gamma correction when adding colors. Default exponent of 2.2.}
\item{aberration}{Default `0`. Adds chromatic aberration to the image. Maximum of `1`.}
\item{transparent_water}{Default `FALSE`. If `TRUE`, depth is determined without water layer. User will have to re-render the water
layer with `render_water()` if they want to recreate the water layer.}
\item{heightmap}{Default `NULL`. The height matrix for the scene. Passing this will allow `render_depth()`
to automatically redraw the water layer if `transparent_water = TRUE`.}
\item{zscale}{Default `NULL`. The zscale value for the heightmap. Passing this will allow `render_depth()`
to automatically redraw the water layer if `transparent_water = TRUE`.}
\item{title_text}{Default `NULL`. Text. Adds a title to the image, using magick::image_annotate.}
\item{title_offset}{Default `c(20,20)`. Distance from the top-left (default, `gravity` direction in
image_annotate) corner to offset the title.}
\item{title_color}{Default `black`. Font color.}
\item{title_size}{Default `30`. Font size in pixels.}
\item{title_font}{Default `sans`. String with font family such as "sans", "mono", "serif", "Times", "Helvetica",
"Trebuchet", "Georgia", "Palatino" or "Comic Sans".}
\item{title_bar_color}{Default `NULL`. If a color, this will create a colored bar under the title.}
\item{title_bar_alpha}{Default `0.5`. Transparency of the title bar.}
\item{title_position}{Default `northwest`. Position of the title.}
\item{image_overlay}{Default `NULL`. Either a string indicating the location of a png image to overlay
over the image (transparency included), or a 4-layer RGBA array. This image will be resized to the
dimension of the image if it does not match exactly.}
\item{vignette}{Default `FALSE`. If `TRUE` or numeric, a camera vignetting effect will be added to the image.
`1` is the darkest vignetting, while `0` is no vignetting. If vignette is a length-2 vector, the second entry will
control the blurriness of the vignette effect.}
\item{vignette_color}{Default `"black"`. Color of the vignette.}
\item{vignette_radius}{Default `1.3`. Radius of the vignette, as a porportion of the image dimensions.}
\item{progbar}{Default `TRUE` if in an interactive session. Displays a progress bar.}
\item{software_render}{Default `FALSE`. If `TRUE`, rayshader will use the rayvertex package to render the snapshot, which
is not constrained by the screen size or requires OpenGL.}
\item{width}{Default `NULL`. Optional argument to pass to `rgl::snapshot3d()` to specify the
width when `software_render = TRUE`..}
\item{height}{Default `NULL`. Optional argument to pass to `rgl::snapshot3d()` to specify the
height when `software_render = TRUE`.}
\item{camera_location}{Default `NULL`. Custom position of the camera. The `FOV`, `width`, and `height` arguments will still
be derived from the rgl window.}
\item{camera_lookat}{Default `NULL`. Custom point at which the camera is directed. The `FOV`, `width`, and `height` arguments will still
be derived from the rgl window.}
\item{background}{Default `"white"`. Background color when `software_render = TRUE`.}
\item{text_angle}{Default `NULL`, which forces the text always to face the camera. If a single angle (degrees),
will specify the absolute angle all the labels are facing. If three angles, this will specify all three orientations
(relative to the x,y, and z axes) of the text labels.}
\item{text_size}{Default `10`. Height of the text.}
\item{text_offset}{Default `c(0,0,0)`. Offset to be applied to all text labels.}
\item{point_radius}{Default `0.5`. Radius of 3D points (rendered with `render_points()`.}
\item{line_offset}{Default `1e-7`. Small number indicating the offset in the scene to apply to lines if using software rendering. Increase this if your lines
aren't showing up, or decrease it if lines are appearing through solid objects.}
\item{cache_scene}{Default `FALSE`. Whether to cache the current scene to memory so it does not have to be converted to a `raymesh` object
each time `render_snapshot()` is called. If `TRUE` and a scene has been cached, it will be used when rendering.}
\item{reset_scene_cache}{Default `FALSE`. Resets the scene cache before rendering.}
\item{print_scene_info}{Default `FALSE`. If `TRUE`, it will print the position and lookat point of the camera.}
\item{instant_capture}{Default `TRUE` if interactive, `FALSE` otherwise. If `FALSE`, a slight delay is added
before taking the snapshot. This can help stop prevent rendering issues when running scripts.}
\item{clear}{Default `FALSE`. If `TRUE`, the current `rgl` device will be cleared.}
\item{bring_to_front}{Default `FALSE`. Whether to bring the window to the front when rendering the snapshot.}
\item{...}{Additional parameters to pass to `rayvertex::rasterize_scene()`.}
}
\value{
4-layer RGBA array.
}
\description{
Adds depth of field to the current RGL scene by simulating a synthetic aperture.
The size of the circle of confusion is determined by the following formula (z_depth is from the image's depth map).
\code{abs(z_depth-focus)*focal_length^2/(f_stop*z_depth*(focus - focal_length))}
}
\examples{
if(run_documentation()) {
montereybay \%>\%
sphere_shade() \%>\%
plot_3d(montereybay,zscale=50, water=TRUE, waterlinecolor="white",
zoom=0.3,theta=-135,fov=70, phi=20)
#Preview where the focal plane lies
render_depth(preview_focus=TRUE)
}
if(run_documentation()) {
#Render the depth of field effect
render_depth(focallength = 300)
}
if(run_documentation()) {
#Add a chromatic aberration effect
render_depth(focallength = 300, aberration = 0.3)
}
if(run_documentation()) {
#Render the depth of field effect, ignoring water and re-drawing the waterlayer
render_depth(preview_focus=TRUE,
heightmap = montereybay, zscale=50, focallength=300, transparent_water=TRUE)
render_depth(heightmap = montereybay, zscale=50, focallength=300, transparent_water=TRUE)
render_camera(theta=45,zoom=0.15,phi=20)
}
if(run_documentation()) {
#Change the bokeh shape and intensity
render_depth(focus=900, bokehshape = "circle",focallength=500,bokehintensity=30,
title_text = "Circular Bokeh", title_size = 30, title_color = "white",
title_bar_color = "black")
render_depth(focus=900, bokehshape = "hex",focallength=500,bokehintensity=30,
title_text = "Hexagonal Bokeh", title_size = 30, title_color = "white",
title_bar_color = "black")
}
if(run_documentation()) {
#Add a title and vignette effect.
render_camera(theta=0,zoom=0.7,phi=30)
render_depth(focallength = 250, title_text = "Monterey Bay, CA",
title_size = 20, title_color = "white", title_bar_color = "black", vignette = TRUE)
}
}
| /man/render_depth.Rd | no_license | tylermorganwall/rayshader | R | false | true | 8,891 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/render_depth.R
\name{render_depth}
\alias{render_depth}
\title{Render Depth of Field}
\usage{
render_depth(
focus = NULL,
focallength = 100,
fstop = 4,
filename = NULL,
preview_focus = FALSE,
bokehshape = "circle",
bokehintensity = 1,
bokehlimit = 0.8,
rotation = 0,
gamma_correction = TRUE,
aberration = 0,
transparent_water = FALSE,
heightmap = NULL,
zscale = NULL,
title_text = NULL,
title_offset = c(20, 20),
title_color = "black",
title_size = 30,
title_font = "sans",
title_bar_color = NULL,
title_bar_alpha = 0.5,
title_position = "northwest",
image_overlay = NULL,
vignette = FALSE,
vignette_color = "black",
vignette_radius = 1.3,
progbar = interactive(),
software_render = FALSE,
width = NULL,
height = NULL,
camera_location = NULL,
camera_lookat = c(0, 0, 0),
background = "white",
text_angle = NULL,
text_size = 10,
text_offset = c(0, 0, 0),
point_radius = 0.5,
line_offset = 1e-07,
cache_scene = FALSE,
reset_scene_cache = FALSE,
print_scene_info = FALSE,
instant_capture = interactive(),
clear = FALSE,
bring_to_front = FALSE,
...
)
}
\arguments{
\item{focus}{Focal point. Defaults to the center of the bounding box. Depth in which to blur, in distance to the camera plane.}
\item{focallength}{Default `1`. Focal length of the virtual camera.}
\item{fstop}{Default `1`. F-stop of the virtual camera.}
\item{filename}{The filename of the image to be saved. If this is not given, the image will be plotted instead.}
\item{preview_focus}{Default `FALSE`. If `TRUE`, a red line will be drawn across the image
showing where the camera will be focused.}
\item{bokehshape}{Default `circle`. Also built-in: `hex`. The shape of the bokeh.}
\item{bokehintensity}{Default `3`. Intensity of the bokeh when the pixel intensity is greater than `bokehlimit`.}
\item{bokehlimit}{Default `0.8`. Limit after which the bokeh intensity is increased by `bokehintensity`.}
\item{rotation}{Default `0`. Number of degrees to rotate the hexagon bokeh shape.}
\item{gamma_correction}{Default `TRUE`. Controls gamma correction when adding colors. Default exponent of 2.2.}
\item{aberration}{Default `0`. Adds chromatic aberration to the image. Maximum of `1`.}
\item{transparent_water}{Default `FALSE`. If `TRUE`, depth is determined without water layer. User will have to re-render the water
layer with `render_water()` if they want to recreate the water layer.}
\item{heightmap}{Default `NULL`. The height matrix for the scene. Passing this will allow `render_depth()`
to automatically redraw the water layer if `transparent_water = TRUE`.}
\item{zscale}{Default `NULL`. The zscale value for the heightmap. Passing this will allow `render_depth()`
to automatically redraw the water layer if `transparent_water = TRUE`.}
\item{title_text}{Default `NULL`. Text. Adds a title to the image, using magick::image_annotate.}
\item{title_offset}{Default `c(20,20)`. Distance from the top-left (default, `gravity` direction in
image_annotate) corner to offset the title.}
\item{title_color}{Default `black`. Font color.}
\item{title_size}{Default `30`. Font size in pixels.}
\item{title_font}{Default `sans`. String with font family such as "sans", "mono", "serif", "Times", "Helvetica",
"Trebuchet", "Georgia", "Palatino" or "Comic Sans".}
\item{title_bar_color}{Default `NULL`. If a color, this will create a colored bar under the title.}
\item{title_bar_alpha}{Default `0.5`. Transparency of the title bar.}
\item{title_position}{Default `northwest`. Position of the title.}
\item{image_overlay}{Default `NULL`. Either a string indicating the location of a png image to overlay
over the image (transparency included), or a 4-layer RGBA array. This image will be resized to the
dimension of the image if it does not match exactly.}
\item{vignette}{Default `FALSE`. If `TRUE` or numeric, a camera vignetting effect will be added to the image.
`1` is the darkest vignetting, while `0` is no vignetting. If vignette is a length-2 vector, the second entry will
control the blurriness of the vignette effect.}
\item{vignette_color}{Default `"black"`. Color of the vignette.}
\item{vignette_radius}{Default `1.3`. Radius of the vignette, as a porportion of the image dimensions.}
\item{progbar}{Default `TRUE` if in an interactive session. Displays a progress bar.}
\item{software_render}{Default `FALSE`. If `TRUE`, rayshader will use the rayvertex package to render the snapshot, which
is not constrained by the screen size or requires OpenGL.}
\item{width}{Default `NULL`. Optional argument to pass to `rgl::snapshot3d()` to specify the
width when `software_render = TRUE`..}
\item{height}{Default `NULL`. Optional argument to pass to `rgl::snapshot3d()` to specify the
height when `software_render = TRUE`.}
\item{camera_location}{Default `NULL`. Custom position of the camera. The `FOV`, `width`, and `height` arguments will still
be derived from the rgl window.}
\item{camera_lookat}{Default `NULL`. Custom point at which the camera is directed. The `FOV`, `width`, and `height` arguments will still
be derived from the rgl window.}
\item{background}{Default `"white"`. Background color when `software_render = TRUE`.}
\item{text_angle}{Default `NULL`, which forces the text always to face the camera. If a single angle (degrees),
will specify the absolute angle all the labels are facing. If three angles, this will specify all three orientations
(relative to the x,y, and z axes) of the text labels.}
\item{text_size}{Default `10`. Height of the text.}
\item{text_offset}{Default `c(0,0,0)`. Offset to be applied to all text labels.}
\item{point_radius}{Default `0.5`. Radius of 3D points (rendered with `render_points()`.}
\item{line_offset}{Default `1e-7`. Small number indicating the offset in the scene to apply to lines if using software rendering. Increase this if your lines
aren't showing up, or decrease it if lines are appearing through solid objects.}
\item{cache_scene}{Default `FALSE`. Whether to cache the current scene to memory so it does not have to be converted to a `raymesh` object
each time `render_snapshot()` is called. If `TRUE` and a scene has been cached, it will be used when rendering.}
\item{reset_scene_cache}{Default `FALSE`. Resets the scene cache before rendering.}
\item{print_scene_info}{Default `FALSE`. If `TRUE`, it will print the position and lookat point of the camera.}
\item{instant_capture}{Default `TRUE` if interactive, `FALSE` otherwise. If `FALSE`, a slight delay is added
before taking the snapshot. This can help stop prevent rendering issues when running scripts.}
\item{clear}{Default `FALSE`. If `TRUE`, the current `rgl` device will be cleared.}
\item{bring_to_front}{Default `FALSE`. Whether to bring the window to the front when rendering the snapshot.}
\item{...}{Additional parameters to pass to `rayvertex::rasterize_scene()`.}
}
\value{
4-layer RGBA array.
}
\description{
Adds depth of field to the current RGL scene by simulating a synthetic aperture.
The size of the circle of confusion is determined by the following formula (z_depth is from the image's depth map).
\code{abs(z_depth-focus)*focal_length^2/(f_stop*z_depth*(focus - focal_length))}
}
\examples{
if(run_documentation()) {
montereybay \%>\%
sphere_shade() \%>\%
plot_3d(montereybay,zscale=50, water=TRUE, waterlinecolor="white",
zoom=0.3,theta=-135,fov=70, phi=20)
#Preview where the focal plane lies
render_depth(preview_focus=TRUE)
}
if(run_documentation()) {
#Render the depth of field effect
render_depth(focallength = 300)
}
if(run_documentation()) {
#Add a chromatic aberration effect
render_depth(focallength = 300, aberration = 0.3)
}
if(run_documentation()) {
#Render the depth of field effect, ignoring water and re-drawing the waterlayer
render_depth(preview_focus=TRUE,
heightmap = montereybay, zscale=50, focallength=300, transparent_water=TRUE)
render_depth(heightmap = montereybay, zscale=50, focallength=300, transparent_water=TRUE)
render_camera(theta=45,zoom=0.15,phi=20)
}
if(run_documentation()) {
#Change the bokeh shape and intensity
render_depth(focus=900, bokehshape = "circle",focallength=500,bokehintensity=30,
title_text = "Circular Bokeh", title_size = 30, title_color = "white",
title_bar_color = "black")
render_depth(focus=900, bokehshape = "hex",focallength=500,bokehintensity=30,
title_text = "Hexagonal Bokeh", title_size = 30, title_color = "white",
title_bar_color = "black")
}
if(run_documentation()) {
#Add a title and vignette effect.
render_camera(theta=0,zoom=0.7,phi=30)
render_depth(focallength = 250, title_text = "Monterey Bay, CA",
title_size = 20, title_color = "white", title_bar_color = "black", vignette = TRUE)
}
}
|
#' Tidal information for a location within the USA.
#' Tidal information only available for US cities. Units are in feet.
#'
#' @param location location set by set_location
#' @param key weather underground API key
#' @param raw if TRUE return raw httr object
#' @param message if TRUE print out requested URL
#' @return tbl_df with date, height and type
#' @export
#' @examples
#' \dontrun{
#' tide(set_location(territory = "Hawaii", city = "Honolulu"))
#' tide(set_location(territory = "Washington", city = "Seattle"))
#' tide(set_location(territory = "Louisiana", city = "New Orleans"))
#' }
tide <- function(location,
key = get_api_key(),
raw = FALSE,
message = TRUE) {
parsed_req <- wunderground_request(
request_type = "tide",
location = location,
key = key,
message = message
)
if (raw) {
return(parsed_req)
}
stop_for_error(parsed_req)
if (!("tide" %in% names(parsed_req))) {
stop(paste0("Cannot parse tide information from JSON for: ", location))
}
tide <- parsed_req$tide
tide_info <- tide$tideInfo[[1]]
if (all(tide_info == "")) stop(paste0("Tide info not available for: ", location))
if (length(tide$tideSummary) == 0) stop(paste0("Tide info not available for: ", location))
if (message) {
print(paste0(tide_info$tideSite, ": ", tide_info$lat, "/", tide_info$lon))
}
## summary stats unused (min/max tide for day)
tide_summary_stats <- tide$tideSummaryStats
tide_summary <- tide$tideSummary
df <- lapply(tide_summary, function(x) {
data.frame(
date = as.POSIXct(as.numeric(x$date$epoch), origin = "1970-01-01", tz = x$date$tzname),
height = as.numeric(gsub("ft", "", x$data$height)),
type = x$data$type,
stringsAsFactors = FALSE
)
})
tide_df <- dplyr::tbl_df(dplyr::bind_rows(df))
dplyr::filter(tide_df, !is.na(tide_df$height))
}
#' Raw Tidal data with data every 5 minutes for US locations
#' Tidal information only available for US cities. Units are in feet.
#'
#' @param location location set by set_location
#' @param key weather underground API key
#' @param raw if TRUE return raw httr object
#' @param message if TRUE print out requested URL
#' @return tbl_df with time (epoch) and height
#' @export
#' @examples
#' \dontrun{
#' rawtide(set_location(territory = "Hawaii", city = "Honolulu"))
#' rawtide(set_location(territory = "Washington", city = "Seattle"))
#' rawtide(set_location(territory = "Louisiana", city = "New Orleans"))
#' }
rawtide <- function(location,
key = get_api_key(),
raw = FALSE,
message = TRUE) {
parsed_req <- wunderground_request(
request_type = "rawtide",
location = location,
key = key,
message = message
)
if (raw) {
return(parsed_req)
}
stop_for_error(parsed_req)
if (!("rawtide" %in% names(parsed_req))) {
stop(paste0("Cannot parse tide information from JSON for: ", location))
}
rawtide <- parsed_req$rawtide
tide_info <- rawtide$tideInfo[[1]]
if (all(tide_info == "")) stop(paste0("Tide info not available for: ", location))
if (length(rawtide$rawTideObs) == 0) stop(paste0("Tide info not available for: ", location))
if (message) {
print(paste0(tide_info$tideSite, ": ", tide_info$lat, "/", tide_info$lon))
}
## summary stats unused (min/max tide for day)
rawtide_summary_stats <- rawtide$rawTideStats
rawtide_summary <- rawtide$rawTideObs
tz <- rawtide$tideInfo[[1]]$tzname
df <- lapply(rawtide_summary, function(x) {
data.frame(
date = as.POSIXct(x$epoch, origin = "1970-01-01", tz = tz),
height = x$height,
stringsAsFactors = FALSE
)
})
dplyr::tbl_df(dplyr::bind_rows(df))
}
| /R/tide.R | no_license | cran/rwunderground | R | false | false | 3,750 | r | #' Tidal information for a location within the USA.
#' Tidal information only available for US cities. Units are in feet.
#'
#' @param location location set by set_location
#' @param key weather underground API key
#' @param raw if TRUE return raw httr object
#' @param message if TRUE print out requested URL
#' @return tbl_df with date, height and type
#' @export
#' @examples
#' \dontrun{
#' tide(set_location(territory = "Hawaii", city = "Honolulu"))
#' tide(set_location(territory = "Washington", city = "Seattle"))
#' tide(set_location(territory = "Louisiana", city = "New Orleans"))
#' }
tide <- function(location,
key = get_api_key(),
raw = FALSE,
message = TRUE) {
parsed_req <- wunderground_request(
request_type = "tide",
location = location,
key = key,
message = message
)
if (raw) {
return(parsed_req)
}
stop_for_error(parsed_req)
if (!("tide" %in% names(parsed_req))) {
stop(paste0("Cannot parse tide information from JSON for: ", location))
}
tide <- parsed_req$tide
tide_info <- tide$tideInfo[[1]]
if (all(tide_info == "")) stop(paste0("Tide info not available for: ", location))
if (length(tide$tideSummary) == 0) stop(paste0("Tide info not available for: ", location))
if (message) {
print(paste0(tide_info$tideSite, ": ", tide_info$lat, "/", tide_info$lon))
}
## summary stats unused (min/max tide for day)
tide_summary_stats <- tide$tideSummaryStats
tide_summary <- tide$tideSummary
df <- lapply(tide_summary, function(x) {
data.frame(
date = as.POSIXct(as.numeric(x$date$epoch), origin = "1970-01-01", tz = x$date$tzname),
height = as.numeric(gsub("ft", "", x$data$height)),
type = x$data$type,
stringsAsFactors = FALSE
)
})
tide_df <- dplyr::tbl_df(dplyr::bind_rows(df))
dplyr::filter(tide_df, !is.na(tide_df$height))
}
#' Raw Tidal data with data every 5 minutes for US locations
#' Tidal information only available for US cities. Units are in feet.
#'
#' @param location location set by set_location
#' @param key weather underground API key
#' @param raw if TRUE return raw httr object
#' @param message if TRUE print out requested URL
#' @return tbl_df with time (epoch) and height
#' @export
#' @examples
#' \dontrun{
#' rawtide(set_location(territory = "Hawaii", city = "Honolulu"))
#' rawtide(set_location(territory = "Washington", city = "Seattle"))
#' rawtide(set_location(territory = "Louisiana", city = "New Orleans"))
#' }
rawtide <- function(location,
key = get_api_key(),
raw = FALSE,
message = TRUE) {
parsed_req <- wunderground_request(
request_type = "rawtide",
location = location,
key = key,
message = message
)
if (raw) {
return(parsed_req)
}
stop_for_error(parsed_req)
if (!("rawtide" %in% names(parsed_req))) {
stop(paste0("Cannot parse tide information from JSON for: ", location))
}
rawtide <- parsed_req$rawtide
tide_info <- rawtide$tideInfo[[1]]
if (all(tide_info == "")) stop(paste0("Tide info not available for: ", location))
if (length(rawtide$rawTideObs) == 0) stop(paste0("Tide info not available for: ", location))
if (message) {
print(paste0(tide_info$tideSite, ": ", tide_info$lat, "/", tide_info$lon))
}
## summary stats unused (min/max tide for day)
rawtide_summary_stats <- rawtide$rawTideStats
rawtide_summary <- rawtide$rawTideObs
tz <- rawtide$tideInfo[[1]]$tzname
df <- lapply(rawtide_summary, function(x) {
data.frame(
date = as.POSIXct(x$epoch, origin = "1970-01-01", tz = tz),
height = x$height,
stringsAsFactors = FALSE
)
})
dplyr::tbl_df(dplyr::bind_rows(df))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/array.R, R/scalar.R
\docType{class}
\name{array}
\alias{array}
\alias{Array}
\alias{DictionaryArray}
\alias{StructArray}
\alias{ListArray}
\alias{LargeListArray}
\alias{FixedSizeListArray}
\alias{StructScalar}
\title{Arrow Arrays}
\description{
An \code{Array} is an immutable data array with some logical type
and some length. Most logical types are contained in the base
\code{Array} class; there are also subclasses for \code{DictionaryArray}, \code{ListArray},
and \code{StructArray}.
}
\section{Factory}{
The \code{Array$create()} factory method instantiates an \code{Array} and
takes the following arguments:
\itemize{
\item \code{x}: an R vector, list, or \code{data.frame}
\item \code{type}: an optional \link[=data-type]{data type} for \code{x}. If omitted, the type
will be inferred from the data.
}
\code{Array$create()} will return the appropriate subclass of \code{Array}, such as
\code{DictionaryArray} when given an R factor.
To compose a \code{DictionaryArray} directly, call \code{DictionaryArray$create()},
which takes two arguments:
\itemize{
\item \code{x}: an R vector or \code{Array} of integers for the dictionary indices
\item \code{dict}: an R vector or \code{Array} of dictionary values (like R factor levels
but not limited to strings only)
}
}
\section{Usage}{
\preformatted{a <- Array$create(x)
length(a)
print(a)
a == a
}
}
\section{Methods}{
\itemize{
\item \verb{$IsNull(i)}: Return true if value at index is null. Does not boundscheck
\item \verb{$IsValid(i)}: Return true if value at index is valid. Does not boundscheck
\item \verb{$length()}: Size in the number of elements this array contains
\item \verb{$offset}: A relative position into another array's data, to enable zero-copy slicing
\item \verb{$null_count}: The number of null entries in the array
\item \verb{$type}: logical type of data
\item \verb{$type_id()}: type id
\item \verb{$Equals(other)} : is this array equal to \code{other}
\item \verb{$ApproxEquals(other)} :
\item \verb{$Diff(other)} : return a string expressing the difference between two arrays
\item \verb{$data()}: return the underlying \link{ArrayData}
\item \verb{$as_vector()}: convert to an R vector
\item \verb{$ToString()}: string representation of the array
\item \verb{$Slice(offset, length = NULL)}: Construct a zero-copy slice of the array
with the indicated offset and length. If length is \code{NULL}, the slice goes
until the end of the array.
\item \verb{$Take(i)}: return an \code{Array} with values at positions given by integers
(R vector or Array Array) \code{i}.
\item \verb{$Filter(i, keep_na = TRUE)}: return an \code{Array} with values at positions where logical
vector (or Arrow boolean Array) \code{i} is \code{TRUE}.
\item \verb{$SortIndices(descending = FALSE)}: return an \code{Array} of integer positions that can be
used to rearrange the \code{Array} in ascending or descending order
\item \verb{$RangeEquals(other, start_idx, end_idx, other_start_idx)} :
\item \verb{$cast(target_type, safe = TRUE, options = cast_options(safe))}: Alter the
data in the array to change its type.
\item \verb{$View(type)}: Construct a zero-copy view of this array with the given type.
\item \verb{$Validate()} : Perform any validation checks to determine obvious inconsistencies
within the array's internal data. This can be an expensive check, potentially \code{O(length)}
}
}
\examples{
\dontshow{if (arrow_available()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
my_array <- Array$create(1:10)
my_array$type
my_array$cast(int8())
# Check if value is null; zero-indexed
na_array <- Array$create(c(1:5, NA))
na_array$IsNull(0)
na_array$IsNull(5)
na_array$IsValid(5)
na_array$null_count
# zero-copy slicing; the offset of the new Array will be the same as the index passed to $Slice
new_array <- na_array$Slice(5)
new_array$offset
# Compare 2 arrays
na_array2 = na_array
na_array2 == na_array # element-wise comparison
na_array2$Equals(na_array) # overall comparison
\dontshow{\}) # examplesIf}
}
| /r/man/array.Rd | permissive | Sebastiaan-Alvarez-Rodriguez/arrow | R | false | true | 4,092 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/array.R, R/scalar.R
\docType{class}
\name{array}
\alias{array}
\alias{Array}
\alias{DictionaryArray}
\alias{StructArray}
\alias{ListArray}
\alias{LargeListArray}
\alias{FixedSizeListArray}
\alias{StructScalar}
\title{Arrow Arrays}
\description{
An \code{Array} is an immutable data array with some logical type
and some length. Most logical types are contained in the base
\code{Array} class; there are also subclasses for \code{DictionaryArray}, \code{ListArray},
and \code{StructArray}.
}
\section{Factory}{
The \code{Array$create()} factory method instantiates an \code{Array} and
takes the following arguments:
\itemize{
\item \code{x}: an R vector, list, or \code{data.frame}
\item \code{type}: an optional \link[=data-type]{data type} for \code{x}. If omitted, the type
will be inferred from the data.
}
\code{Array$create()} will return the appropriate subclass of \code{Array}, such as
\code{DictionaryArray} when given an R factor.
To compose a \code{DictionaryArray} directly, call \code{DictionaryArray$create()},
which takes two arguments:
\itemize{
\item \code{x}: an R vector or \code{Array} of integers for the dictionary indices
\item \code{dict}: an R vector or \code{Array} of dictionary values (like R factor levels
but not limited to strings only)
}
}
\section{Usage}{
\preformatted{a <- Array$create(x)
length(a)
print(a)
a == a
}
}
\section{Methods}{
\itemize{
\item \verb{$IsNull(i)}: Return true if value at index is null. Does not boundscheck
\item \verb{$IsValid(i)}: Return true if value at index is valid. Does not boundscheck
\item \verb{$length()}: Size in the number of elements this array contains
\item \verb{$offset}: A relative position into another array's data, to enable zero-copy slicing
\item \verb{$null_count}: The number of null entries in the array
\item \verb{$type}: logical type of data
\item \verb{$type_id()}: type id
\item \verb{$Equals(other)} : is this array equal to \code{other}
\item \verb{$ApproxEquals(other)} :
\item \verb{$Diff(other)} : return a string expressing the difference between two arrays
\item \verb{$data()}: return the underlying \link{ArrayData}
\item \verb{$as_vector()}: convert to an R vector
\item \verb{$ToString()}: string representation of the array
\item \verb{$Slice(offset, length = NULL)}: Construct a zero-copy slice of the array
with the indicated offset and length. If length is \code{NULL}, the slice goes
until the end of the array.
\item \verb{$Take(i)}: return an \code{Array} with values at positions given by integers
(R vector or Array Array) \code{i}.
\item \verb{$Filter(i, keep_na = TRUE)}: return an \code{Array} with values at positions where logical
vector (or Arrow boolean Array) \code{i} is \code{TRUE}.
\item \verb{$SortIndices(descending = FALSE)}: return an \code{Array} of integer positions that can be
used to rearrange the \code{Array} in ascending or descending order
\item \verb{$RangeEquals(other, start_idx, end_idx, other_start_idx)} :
\item \verb{$cast(target_type, safe = TRUE, options = cast_options(safe))}: Alter the
data in the array to change its type.
\item \verb{$View(type)}: Construct a zero-copy view of this array with the given type.
\item \verb{$Validate()} : Perform any validation checks to determine obvious inconsistencies
within the array's internal data. This can be an expensive check, potentially \code{O(length)}
}
}
\examples{
\dontshow{if (arrow_available()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
my_array <- Array$create(1:10)
my_array$type
my_array$cast(int8())
# Check if value is null; zero-indexed
na_array <- Array$create(c(1:5, NA))
na_array$IsNull(0)
na_array$IsNull(5)
na_array$IsValid(5)
na_array$null_count
# zero-copy slicing; the offset of the new Array will be the same as the index passed to $Slice
new_array <- na_array$Slice(5)
new_array$offset
# Compare 2 arrays
na_array2 = na_array
na_array2 == na_array # element-wise comparison
na_array2$Equals(na_array) # overall comparison
\dontshow{\}) # examplesIf}
}
|
#!/usr/bin/R
get_test_data = function(folder, m)
{
path = paste("out/", folder, m, "/", m, "_test_data.csv", sep="")
df = read.csv(path, header=F, sep=",", fill=TRUE)
return(df)
}
plot_Results = function()
{
# pdf(file="Results_all_GNNs.pdf", height=10/2.54, width=20/2.54)
folders = list("base/", "paper/")
models = list("GraphSAGE", "GCN", "GraphSAGEWithJK", "GATNet", "GCNWithJK","OwnGraphNN2")#,"OwnGraphNN", "NMP")
num_models = length(models)
total_sds = vector(length=2*num_models)
total_avgs = vector(length=2*num_models)
fold0_sds = vector(length=2*num_models)
fold1_sds = vector(length=2*num_models)
fold2_sds = vector(length=2*num_models)
fold3_sds = vector(length=2*num_models)
fold0_avgs = vector(length=2*num_models)
fold1_avgs = vector(length=2*num_models)
fold2_avgs = vector(length=2*num_models)
fold3_avgs = vector(length=2*num_models)
c = 0
for(folder in folders)
{
for(m in models)
{
c=c+1
df = get_test_data(folder,m)
total_sds[c]=df[1,1]
total_avgs[c]=df[1,2]
fold0_sds[c]=df[2,1]
fold0_avgs[c]=df[2,2]
fold1_sds[c]=df[3,1]
fold1_avgs[c]=df[3,2]
fold2_sds[c]=df[4,1]
fold2_avgs[c]=df[4,2]
fold3_sds[c]=df[5,1]
fold3_avgs[c]=df[5,2]
}
}
df = get_test_data("", "CNN")
cnn_sds = c(df[1,1],df[2,1],df[3,1],df[4,1],df[5,1])
cnn_avgs = c(df[1,2],df[2,2],df[3,2],df[4,2],df[5,2])
x = 1:num_models
x2 = 1:(num_models+1)
x_max= num_models+2
offset = 0.1
colors_base="medium violet red"
colors_base_0 = "red"
colors_base_1 = "dark orange"
colors_base_2 = "gold"
colors_base_3 = "yellow"
colors_paper="medium blue"
colors_paper_0 = "dodger blue"
colors_paper_1 = "cyan"
colors_paper_2 = "medium turquoise"
colors_paper_3 = "dark cyan"
# draw plot
plot(NULL, xlim=c(0.7, x_max-0.3), ylim=c(0.65, 1), xaxt="n", xlab="", ylab="test accuracy", cex.axis=0.5, cex.lab=0.5)
rect(par("usr")[1],par("usr")[3],par("usr")[2],par("usr")[4],col = "white smoke")
rect(xleft=x2-0.05, xright=x2+0.05, ybottom=par("usr")[3], ytop=par("usr")[4], col="white", border="grey", lwd=0.5)
rect(xleft=x2+2*offset-0.05, xright=x2+2*offset+0.05, ybottom=par("usr")[3], ytop=par("usr")[4], col="white", border="grey", lwd=0.5)
rect(xleft=x2+4*offset-0.05, xright=x2+4*offset+0.05, ybottom=par("usr")[3], ytop=par("usr")[4], col="white", border="grey",lwd=0.5)
abline(h=seq(0.65,1,0.01), lty="dotted", col="grey",lwd=0.5)
abline(h=seq(0.65,1,0.05), lty="dashed", col="grey",lwd=0.5)
abline(v=seq(1.7, num_models-0.3,1), col="grey",lwd=0.5)
# draw total sd of every model
arrows(x0=x-0.01, x1=x-0.01, y0=head(total_avgs,num_models)-head(total_sds,num_models), y1=head(total_avgs,num_models)+head(total_sds,num_models), code=3, angle=90, len=0.02, col=colors_base, lwd=1)
arrows(x0=x+0.01, x1=x+0.01, y0=tail(total_avgs,num_models)-tail(total_sds,num_models), y1=tail(total_avgs,num_models)+tail(total_sds,num_models), code=3, angle=90, len=0.02, col=colors_paper, lwd=1)
# draw sd of CNN
for(i in 1:5)
{
arrows(x0=tail(x2,1)+(i-1)*offset, x1=tail(x2,1)+(i-1)*offset, y0=cnn_avgs[i]-cnn_sds[i],y1=cnn_avgs[i]+cnn_sds[i], code=3, angle=90, len=0.02, lwd=1)
}
# draw sd for every fold and every model
arrows(x0=x+offset-0.01, x1=x+offset-0.01, y0=head(fold0_avgs,num_models)-head(fold0_sds,num_models), y1=head(fold0_avgs,num_models)+head(fold0_sds,num_models), code=3, angle=90, len=0.02, col=colors_base_0, lwd=1)
arrows(x0=x+offset+0.01, x1=x+offset+0.01, y0=tail(fold0_avgs,num_models)-tail(fold0_sds,num_models), y1=tail(fold0_avgs,num_models)+tail(fold0_sds,num_models), code=3, angle=90, len=0.02, col= colors_paper_0, lwd=1)
arrows(x0=x+2*offset-0.01, x1=x+2*offset-0.01, y0=head(fold1_avgs,num_models)-head(fold1_sds,num_models), y1=head(fold1_avgs,num_models)+head(fold1_sds,num_models), code=3, angle=90, len=0.02, col=colors_base_1, lwd=1)
arrows(x0=x+2*offset+0.01, x1=x+2*offset+0.01, y0=tail(fold1_avgs,num_models)-tail(fold1_sds,num_models), y1=tail(fold1_avgs,num_models)+tail(fold1_sds,num_models), code=3, angle=90, len=0.02, col= colors_paper_1, lwd=1)
arrows(x0=x+3*offset-0.01, x1=x+3*offset-0.01, y0=head(fold2_avgs,num_models)-head(fold2_sds,num_models), y1=head(fold2_avgs,num_models)+head(fold2_sds,num_models), code=3, angle=90, len=0.02, col=colors_base_2, lwd=1)
arrows(x0=x+3*offset+0.01, x1=x+3*offset+0.01, y0=tail(fold2_avgs,num_models)-tail(fold2_sds,num_models), y1=tail(fold2_avgs,num_models)+tail(fold2_sds,num_models), code=3, angle=90, len=0.02, col= colors_paper_2, lwd=1)
arrows(x0=x+4*offset-0.01, x1=x+4*offset-0.01, y0=head(fold3_avgs,num_models)-head(fold3_sds,num_models), y1=head(fold3_avgs,num_models)+head(fold3_sds,num_models), code=3, angle=90, len=0.02, col=colors_base_3, lwd=1)
arrows(x0=x+4*offset+0.01, x1=x+4*offset+0.01, y0=tail(fold3_avgs,num_models)-tail(fold3_sds,num_models), y1=tail(fold3_avgs,num_models)+tail(fold3_sds,num_models), code=3, angle=90, len=0.02, col= colors_paper_3, lwd=1)
# draw total mean of every model
points(x-0.01, head(total_avgs, num_models), col= colors_base, pch=16, cex=0.6)
text(x-0.23, head(total_avgs, num_models), col= colors_base, label=round(head(total_avgs, num_models), digits=3), cex=0.6)
points(x+0.01, tail(total_avgs, num_models), col= colors_paper, pch=17, cex=0.6)
text(x-0.21, tail(total_avgs, num_models), col= colors_paper, label=round(tail(total_avgs, num_models), digits=3),cex=0.6)
# draw means of every fold and every model
points(x+offset-0.01, head(fold0_avgs,num_models), col=colors_base_0, pch=16,cex=0.6)
points(x+offset+0.01, tail(fold0_avgs,num_models), col= colors_paper_0, pch=17,cex=0.6)
points(x+2*offset-0.01, head(fold1_avgs,num_models), col=colors_base_1, pch=16,cex=0.6)
points(x+2*offset+0.01, tail(fold1_avgs,num_models), col= colors_paper_1, pch=17,cex=0.6)
points(x+3*offset-0.01, head(fold2_avgs,num_models), col=colors_base_2, pch=16,cex=0.6)
points(x+3*offset+0.01, tail(fold2_avgs,num_models), col= colors_paper_2, pch=17,cex=0.6)
points(x+4*offset-0.01, head(fold3_avgs,num_models), col=colors_base_3, pch=16,cex=0.6)
points(x+4*offset+0.01, tail(fold3_avgs,num_models), col= colors_paper_3, pch=17,cex=0.6)
# draw means of CNN
for(i in 1:5)
{
points(tail(x2,1)+(i-1)*offset, cnn_avgs[i], pch=16, cex=0.6)
}
text(tail(x2,1)-0.2, cnn_avgs[1], label=round(cnn_avgs[1], digits=3), cex=0.6)
par(cex=0.5)
# label axis
splits = list("total", "fold 0", "fold 1", "fold 2", "fold 3")
label_location=c()
for(i in x2)
{
label_location = c(label_location,seq(i, i+0.4, 0.1))
}
axis(1, at=label_location, labels=rep(splits, num_models+1) , las=3,lwd.ticks=0.5)
par(cex=0.5)
axis(3, at=(1:(num_models+1))+0.2, labels=c(models, "CNN"), cex=0.1, lwd.ticks=0.5)
par(cex=1)
# dev.copy(pdf, "Results_all_GNNs.pdf")
# dev.off()
}
# setwd("/home/admin1/Desktop/MasterProject/GNNpT1/GNNpT1")
plot_Results()
# Rscript plotResults.r | /plotResults.r | no_license | waljan/GNNpT1 | R | false | false | 7,323 | r | #!/usr/bin/R
get_test_data = function(folder, m)
{
path = paste("out/", folder, m, "/", m, "_test_data.csv", sep="")
df = read.csv(path, header=F, sep=",", fill=TRUE)
return(df)
}
plot_Results = function()
{
# pdf(file="Results_all_GNNs.pdf", height=10/2.54, width=20/2.54)
folders = list("base/", "paper/")
models = list("GraphSAGE", "GCN", "GraphSAGEWithJK", "GATNet", "GCNWithJK","OwnGraphNN2")#,"OwnGraphNN", "NMP")
num_models = length(models)
total_sds = vector(length=2*num_models)
total_avgs = vector(length=2*num_models)
fold0_sds = vector(length=2*num_models)
fold1_sds = vector(length=2*num_models)
fold2_sds = vector(length=2*num_models)
fold3_sds = vector(length=2*num_models)
fold0_avgs = vector(length=2*num_models)
fold1_avgs = vector(length=2*num_models)
fold2_avgs = vector(length=2*num_models)
fold3_avgs = vector(length=2*num_models)
c = 0
for(folder in folders)
{
for(m in models)
{
c=c+1
df = get_test_data(folder,m)
total_sds[c]=df[1,1]
total_avgs[c]=df[1,2]
fold0_sds[c]=df[2,1]
fold0_avgs[c]=df[2,2]
fold1_sds[c]=df[3,1]
fold1_avgs[c]=df[3,2]
fold2_sds[c]=df[4,1]
fold2_avgs[c]=df[4,2]
fold3_sds[c]=df[5,1]
fold3_avgs[c]=df[5,2]
}
}
df = get_test_data("", "CNN")
cnn_sds = c(df[1,1],df[2,1],df[3,1],df[4,1],df[5,1])
cnn_avgs = c(df[1,2],df[2,2],df[3,2],df[4,2],df[5,2])
x = 1:num_models
x2 = 1:(num_models+1)
x_max= num_models+2
offset = 0.1
colors_base="medium violet red"
colors_base_0 = "red"
colors_base_1 = "dark orange"
colors_base_2 = "gold"
colors_base_3 = "yellow"
colors_paper="medium blue"
colors_paper_0 = "dodger blue"
colors_paper_1 = "cyan"
colors_paper_2 = "medium turquoise"
colors_paper_3 = "dark cyan"
# draw plot
plot(NULL, xlim=c(0.7, x_max-0.3), ylim=c(0.65, 1), xaxt="n", xlab="", ylab="test accuracy", cex.axis=0.5, cex.lab=0.5)
rect(par("usr")[1],par("usr")[3],par("usr")[2],par("usr")[4],col = "white smoke")
rect(xleft=x2-0.05, xright=x2+0.05, ybottom=par("usr")[3], ytop=par("usr")[4], col="white", border="grey", lwd=0.5)
rect(xleft=x2+2*offset-0.05, xright=x2+2*offset+0.05, ybottom=par("usr")[3], ytop=par("usr")[4], col="white", border="grey", lwd=0.5)
rect(xleft=x2+4*offset-0.05, xright=x2+4*offset+0.05, ybottom=par("usr")[3], ytop=par("usr")[4], col="white", border="grey",lwd=0.5)
abline(h=seq(0.65,1,0.01), lty="dotted", col="grey",lwd=0.5)
abline(h=seq(0.65,1,0.05), lty="dashed", col="grey",lwd=0.5)
abline(v=seq(1.7, num_models-0.3,1), col="grey",lwd=0.5)
# draw total sd of every model
arrows(x0=x-0.01, x1=x-0.01, y0=head(total_avgs,num_models)-head(total_sds,num_models), y1=head(total_avgs,num_models)+head(total_sds,num_models), code=3, angle=90, len=0.02, col=colors_base, lwd=1)
arrows(x0=x+0.01, x1=x+0.01, y0=tail(total_avgs,num_models)-tail(total_sds,num_models), y1=tail(total_avgs,num_models)+tail(total_sds,num_models), code=3, angle=90, len=0.02, col=colors_paper, lwd=1)
# draw sd of CNN
for(i in 1:5)
{
arrows(x0=tail(x2,1)+(i-1)*offset, x1=tail(x2,1)+(i-1)*offset, y0=cnn_avgs[i]-cnn_sds[i],y1=cnn_avgs[i]+cnn_sds[i], code=3, angle=90, len=0.02, lwd=1)
}
# draw sd for every fold and every model
arrows(x0=x+offset-0.01, x1=x+offset-0.01, y0=head(fold0_avgs,num_models)-head(fold0_sds,num_models), y1=head(fold0_avgs,num_models)+head(fold0_sds,num_models), code=3, angle=90, len=0.02, col=colors_base_0, lwd=1)
arrows(x0=x+offset+0.01, x1=x+offset+0.01, y0=tail(fold0_avgs,num_models)-tail(fold0_sds,num_models), y1=tail(fold0_avgs,num_models)+tail(fold0_sds,num_models), code=3, angle=90, len=0.02, col= colors_paper_0, lwd=1)
arrows(x0=x+2*offset-0.01, x1=x+2*offset-0.01, y0=head(fold1_avgs,num_models)-head(fold1_sds,num_models), y1=head(fold1_avgs,num_models)+head(fold1_sds,num_models), code=3, angle=90, len=0.02, col=colors_base_1, lwd=1)
arrows(x0=x+2*offset+0.01, x1=x+2*offset+0.01, y0=tail(fold1_avgs,num_models)-tail(fold1_sds,num_models), y1=tail(fold1_avgs,num_models)+tail(fold1_sds,num_models), code=3, angle=90, len=0.02, col= colors_paper_1, lwd=1)
arrows(x0=x+3*offset-0.01, x1=x+3*offset-0.01, y0=head(fold2_avgs,num_models)-head(fold2_sds,num_models), y1=head(fold2_avgs,num_models)+head(fold2_sds,num_models), code=3, angle=90, len=0.02, col=colors_base_2, lwd=1)
arrows(x0=x+3*offset+0.01, x1=x+3*offset+0.01, y0=tail(fold2_avgs,num_models)-tail(fold2_sds,num_models), y1=tail(fold2_avgs,num_models)+tail(fold2_sds,num_models), code=3, angle=90, len=0.02, col= colors_paper_2, lwd=1)
arrows(x0=x+4*offset-0.01, x1=x+4*offset-0.01, y0=head(fold3_avgs,num_models)-head(fold3_sds,num_models), y1=head(fold3_avgs,num_models)+head(fold3_sds,num_models), code=3, angle=90, len=0.02, col=colors_base_3, lwd=1)
arrows(x0=x+4*offset+0.01, x1=x+4*offset+0.01, y0=tail(fold3_avgs,num_models)-tail(fold3_sds,num_models), y1=tail(fold3_avgs,num_models)+tail(fold3_sds,num_models), code=3, angle=90, len=0.02, col= colors_paper_3, lwd=1)
# draw total mean of every model
points(x-0.01, head(total_avgs, num_models), col= colors_base, pch=16, cex=0.6)
text(x-0.23, head(total_avgs, num_models), col= colors_base, label=round(head(total_avgs, num_models), digits=3), cex=0.6)
points(x+0.01, tail(total_avgs, num_models), col= colors_paper, pch=17, cex=0.6)
text(x-0.21, tail(total_avgs, num_models), col= colors_paper, label=round(tail(total_avgs, num_models), digits=3),cex=0.6)
# draw means of every fold and every model
points(x+offset-0.01, head(fold0_avgs,num_models), col=colors_base_0, pch=16,cex=0.6)
points(x+offset+0.01, tail(fold0_avgs,num_models), col= colors_paper_0, pch=17,cex=0.6)
points(x+2*offset-0.01, head(fold1_avgs,num_models), col=colors_base_1, pch=16,cex=0.6)
points(x+2*offset+0.01, tail(fold1_avgs,num_models), col= colors_paper_1, pch=17,cex=0.6)
points(x+3*offset-0.01, head(fold2_avgs,num_models), col=colors_base_2, pch=16,cex=0.6)
points(x+3*offset+0.01, tail(fold2_avgs,num_models), col= colors_paper_2, pch=17,cex=0.6)
points(x+4*offset-0.01, head(fold3_avgs,num_models), col=colors_base_3, pch=16,cex=0.6)
points(x+4*offset+0.01, tail(fold3_avgs,num_models), col= colors_paper_3, pch=17,cex=0.6)
# draw means of CNN
for(i in 1:5)
{
points(tail(x2,1)+(i-1)*offset, cnn_avgs[i], pch=16, cex=0.6)
}
text(tail(x2,1)-0.2, cnn_avgs[1], label=round(cnn_avgs[1], digits=3), cex=0.6)
par(cex=0.5)
# label axis
splits = list("total", "fold 0", "fold 1", "fold 2", "fold 3")
label_location=c()
for(i in x2)
{
label_location = c(label_location,seq(i, i+0.4, 0.1))
}
axis(1, at=label_location, labels=rep(splits, num_models+1) , las=3,lwd.ticks=0.5)
par(cex=0.5)
axis(3, at=(1:(num_models+1))+0.2, labels=c(models, "CNN"), cex=0.1, lwd.ticks=0.5)
par(cex=1)
# dev.copy(pdf, "Results_all_GNNs.pdf")
# dev.off()
}
# setwd("/home/admin1/Desktop/MasterProject/GNNpT1/GNNpT1")
plot_Results()
# Rscript plotResults.r |
library(MCMCglmm)
### Name: commutation
### Title: Commutation Matrix
### Aliases: commutation
### Keywords: array
### ** Examples
commutation(2,2)
| /data/genthat_extracted_code/MCMCglmm/examples/commutation.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 155 | r | library(MCMCglmm)
### Name: commutation
### Title: Commutation Matrix
### Aliases: commutation
### Keywords: array
### ** Examples
commutation(2,2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wbt.R
\name{wbt_list_tools}
\alias{wbt_list_tools}
\title{All available tools in WhiteboxTools.}
\usage{
wbt_list_tools(keywords = NULL)
}
\arguments{
\item{keywords}{Keywords may be used to search available tools.}
}
\value{
Return all available tools in WhiteboxTools that contain the keywords.
}
\description{
All available tools in WhiteboxTools.
}
\examples{
\dontrun{
wbt_list_tools("lidar")
}
}
| /man/wbt_list_tools.Rd | permissive | bkielstr/whiteboxR | R | false | true | 480 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wbt.R
\name{wbt_list_tools}
\alias{wbt_list_tools}
\title{All available tools in WhiteboxTools.}
\usage{
wbt_list_tools(keywords = NULL)
}
\arguments{
\item{keywords}{Keywords may be used to search available tools.}
}
\value{
Return all available tools in WhiteboxTools that contain the keywords.
}
\description{
All available tools in WhiteboxTools.
}
\examples{
\dontrun{
wbt_list_tools("lidar")
}
}
|
prop.wtable <- function(var1,var2=NULL,w=rep.int(1,length(var1)),dir=0,digits=1,mar=TRUE,na=TRUE) {
t <- wtable(var1,var2,w=w,digits=10,mar=TRUE,na=na)
if(is.null(var2)) {
wtab <- 100*2*t/sum(t)
rownames(wtab) <- rownames(t)
if(mar==FALSE) wtab <- as.matrix(wtab[-length(wtab),])
} else {
if(dir==0) wtab <- 100*4*t/sum(t)
if(dir==1) wtab <- apply(t,2,function(x) 100*2*x/rowSums(t))
if(dir==2) wtab <- t(apply(t,1,function(x) 100*2*x/colSums(t)))
dimnames(wtab) <- dimnames(t)
if(mar==FALSE) wtab <- wtab[-nrow(wtab),-ncol(wtab)]
}
wtab <- round(wtab,digits)
return(wtab)
}
| /GDAtools/R/prop.wtable.R | no_license | ingted/R-Examples | R | false | false | 623 | r | prop.wtable <- function(var1,var2=NULL,w=rep.int(1,length(var1)),dir=0,digits=1,mar=TRUE,na=TRUE) {
t <- wtable(var1,var2,w=w,digits=10,mar=TRUE,na=na)
if(is.null(var2)) {
wtab <- 100*2*t/sum(t)
rownames(wtab) <- rownames(t)
if(mar==FALSE) wtab <- as.matrix(wtab[-length(wtab),])
} else {
if(dir==0) wtab <- 100*4*t/sum(t)
if(dir==1) wtab <- apply(t,2,function(x) 100*2*x/rowSums(t))
if(dir==2) wtab <- t(apply(t,1,function(x) 100*2*x/colSums(t)))
dimnames(wtab) <- dimnames(t)
if(mar==FALSE) wtab <- wtab[-nrow(wtab),-ncol(wtab)]
}
wtab <- round(wtab,digits)
return(wtab)
}
|
#' Use the bulk API to create, index, update, or delete documents.
#'
#' @export
#' @param x A data.frame or path to a file to load in the bulk API
#' @param index (character) The index name to use. Required for data.frame input, but
#' optional for file inputs.
#' @param type (character) The type name to use. If left as NULL, will be same name as index.
#' @param chunk_size (integer) Size of each chunk. If your data.frame is smaller
#' thank \code{chunk_size}, this parameter is essentially ignored. We write in chunks because
#' at some point, depending on size of each document, and Elasticsearch setup, writing a very
#' large number of documents in one go becomes slow, so chunking can help. This parameter
#' is ignored if you pass a file name. Default: 1000
#' @param doc_ids An optional vector (character or numeric/integer) of document ids to use.
#' This vector has to equal the size of the documents you are passing in, and will error
#' if not. If you pass a factor we convert to character. Default: not passed
#' @param raw (logical) Get raw JSON back or not.
#' @param ... Pass on curl options to \code{\link[httr]{POST}}
#' @details More on the Bulk API:
#' \url{https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html}.
#'
#' This function dispatches on data.frame or character input. Character input has
#' to be a file name or the function stops with an error message.
#'
#' If you pass a data.frame to this function, we by default to an index operation,
#' that is, create the record in the index and type given by those parameters to the
#' function. Down the road perhaps we will try to support other operations on the
#' bulk API. if you pass a file, of course in that file, you can specify any
#' operations you want.
#'
#' Row names are dropped from data.frame, and top level names for a list are dropped
#' as well.
#'
#' A progress bar gives the progress for data.frames and lists
#'
#' @section Large numbers for document IDs:
#' Until recently, if you had very large integers for document IDs, \code{docs_bulk}
#' failed. It should be fixed now. Let us know if not.
#'
#' @examples \dontrun{
#' plosdat <- system.file("examples", "plos_data.json", package = "elastic")
#' docs_bulk(plosdat)
#' aliases_get()
#' index_delete(index='plos')
#' aliases_get()
#'
#' # Curl options
#' library("httr")
#' plosdat <- system.file("examples", "plos_data.json", package = "elastic")
#' docs_bulk(plosdat, config=verbose())
#'
#' # From a data.frame
#' docs_bulk(mtcars, index = "hello", type = "world")
#' docs_bulk(iris, "iris", "flowers")
#' ## type can be missing, but index can not
#' docs_bulk(iris, "flowers")
#' ## big data.frame, 53K rows, load ggplot2 package first
#' # res <- docs_bulk(diamonds, "diam")
#' # Search("diam")$hits$total
#'
#' # From a list
#' docs_bulk(apply(iris, 1, as.list), index="iris", type="flowers")
#' docs_bulk(apply(USArrests, 1, as.list), index="arrests")
#' # dim_list <- apply(diamonds, 1, as.list)
#' # out <- docs_bulk(dim_list, index="diamfromlist")
#'
#' # When using in a loop
#' ## We internally get last _id counter to know where to start on next bulk insert
#' ## but you need to sleep in between docs_bulk calls, longer the bigger the data is
#' files <- c(system.file("examples", "test1.csv", package = "elastic"),
#' system.file("examples", "test2.csv", package = "elastic"),
#' system.file("examples", "test3.csv", package = "elastic"))
#' for (i in seq_along(files)) {
#' d <- read.csv(files[[i]])
#' docs_bulk(d, index = "testes", type = "docs")
#' Sys.sleep(1)
#' }
#' count("testes", "docs")
#' index_delete("testes")
#'
#' # You can include your own document id numbers
#' ## Either pass in as an argument
#' index_create("testes")
#' files <- c(system.file("examples", "test1.csv", package = "elastic"),
#' system.file("examples", "test2.csv", package = "elastic"),
#' system.file("examples", "test3.csv", package = "elastic"))
#' tt <- vapply(files, function(z) NROW(read.csv(z)), numeric(1))
#' ids <- list(1:tt[1],
#' (tt[1] + 1):(tt[1] + tt[2]),
#' (tt[1] + tt[2] + 1):sum(tt))
#' for (i in seq_along(files)) {
#' d <- read.csv(files[[i]])
#' docs_bulk(d, index = "testes", type = "docs", doc_ids = ids[[i]])
#' }
#' count("testes", "docs")
#' index_delete("testes")
#'
#' ## or include in the input data
#' ### from data.frame's
#' index_create("testes")
#' files <- c(system.file("examples", "test1_id.csv", package = "elastic"),
#' system.file("examples", "test2_id.csv", package = "elastic"),
#' system.file("examples", "test3_id.csv", package = "elastic"))
#' readLines(files[[1]])
#' for (i in seq_along(files)) {
#' d <- read.csv(files[[i]])
#' docs_bulk(d, index = "testes", type = "docs")
#' }
#' count("testes", "docs")
#' index_delete("testes")
#'
#' ### from lists via file inputs
#' index_create("testes")
#' for (i in seq_along(files)) {
#' d <- read.csv(files[[i]])
#' d <- apply(d, 1, as.list)
#' docs_bulk(d, index = "testes", type = "docs")
#' }
#' count("testes", "docs")
#' index_delete("testes")
#' }
docs_bulk <- function(x, index = NULL, type = NULL, chunk_size = 1000,
doc_ids = NULL, raw=FALSE, ...) {
UseMethod("docs_bulk")
}
#' @export
docs_bulk.data.frame <- function(x, index = NULL, type = NULL, chunk_size = 1000,
doc_ids = NULL, raw = FALSE, ...) {
checkconn()
if (is.null(index)) {
stop("index can't be NULL when passing a data.frame",
call. = FALSE)
}
if (is.null(type)) type <- index
check_doc_ids(x, doc_ids)
if (is.factor(doc_ids)) doc_ids <- as.character(doc_ids)
row.names(x) <- NULL
rws <- seq_len(NROW(x))
data_chks <- split(rws, ceiling(seq_along(rws) / chunk_size))
if (!is.null(doc_ids)) {
id_chks <- split(doc_ids, ceiling(seq_along(doc_ids) / chunk_size))
} else if (has_ids(x)) {
rws <- x$id
id_chks <- split(rws, ceiling(seq_along(rws) / chunk_size))
} else {
rws <- shift_start(rws, index, type)
id_chks <- split(rws, ceiling(seq_along(rws) / chunk_size))
}
pb <- txtProgressBar(min = 0, max = length(data_chks), initial = 0, style = 3)
on.exit(close(pb))
for (i in seq_along(data_chks)) {
setTxtProgressBar(pb, i)
docs_bulk(make_bulk(x[data_chks[[i]], ], index, type, id_chks[[i]]), ...)
}
}
#' @export
docs_bulk.list <- function(x, index = NULL, type = NULL, chunk_size = 1000,
doc_ids = NULL, raw = FALSE, ...) {
checkconn()
if (is.null(index)) {
stop("index can't be NULL when passing a list",
call. = FALSE)
}
if (is.null(type)) type <- index
check_doc_ids(x, doc_ids)
if (is.factor(doc_ids)) doc_ids <- as.character(doc_ids)
x <- unname(x)
x <- check_named_vectors(x)
rws <- seq_len(length(x))
data_chks <- split(rws, ceiling(seq_along(rws) / chunk_size))
if (!is.null(doc_ids)) {
id_chks <- split(doc_ids, ceiling(seq_along(doc_ids) / chunk_size))
} else if (has_ids(x)) {
rws <- as.numeric(sapply(x, "[[", "id"))
id_chks <- split(rws, ceiling(seq_along(rws) / chunk_size))
} else {
rws <- shift_start(rws, index, type)
id_chks <- split(rws, ceiling(seq_along(rws) / chunk_size))
}
pb <- txtProgressBar(min = 0, max = length(data_chks), initial = 0, style = 3)
on.exit(close(pb))
for (i in seq_along(data_chks)) {
setTxtProgressBar(pb, i)
docs_bulk(make_bulk(x[data_chks[[i]]], index, type, id_chks[[i]]), ...)
}
}
#' @export
docs_bulk.character <- function(x, index = NULL, type = NULL, chunk_size = 1000,
doc_ids = NULL, raw=FALSE, ...) {
on.exit(close_conns())
checkconn()
stopifnot(file.exists(x))
conn <- es_get_auth()
url <- paste0(conn$base, ":", conn$port, '/_bulk')
tt <- POST(url, make_up(), ..., body = upload_file(x, type = "application/json"), encode = "json")
if (tt$status_code > 202) {
if (tt$status_code > 202) stop(content(tt)$error)
if (content(tt)$status == "ERROR" | content(tt)$status == 500) stop(content(tt)$error_message)
}
res <- content(tt, as = "text")
res <- structure(res, class = "bulk_make")
if (raw) res else es_parse(res)
}
make_bulk <- function(df, index, type, counter) {
if (!is.character(counter)) {
if (max(counter) >= 10000000000) {
scipen <- getOption("scipen")
options(scipen = 100)
on.exit(options(scipen = scipen))
}
}
metadata_fmt <- if (is.character(counter)) {
'{"index":{"_index":"%s","_type":"%s","_id":"%s"}}'
} else {
'{"index":{"_index":"%s","_type":"%s","_id":%s}}'
}
metadata <- sprintf(
metadata_fmt,
index,
type,
if (is.numeric(counter)) {
counter - 1L
} else {
counter
}
)
data <- jsonlite::toJSON(df, collapse = FALSE)
tmpf <- tempfile("elastic__")
writeLines(paste(metadata, data, sep = "\n"), tmpf)
invisible(tmpf)
}
shift_start <- function(vals, index, type = NULL) {
num <- tryCatch(count(index, type), error = function(e) e)
if (is(num, "error")) {
vals
} else {
vals + num
}
}
check_doc_ids <- function(x, ids) {
if (!is.null(ids)) {
# check class type
if (!class(ids) %in% c('character', 'factor', 'numeric', 'integer')) {
stop("doc_ids must be of class character, numeric or integer", call. = FALSE)
}
# check appropriate length
if (!all(1:NROW(x) == 1:length(ids))) {
stop("doc_ids length must equal number of documents", call. = FALSE)
}
}
}
has_ids <- function(x) {
if (is(x, "data.frame")) {
"id" %in% names(x)
} else if (is(x, "list")) {
ids <- ec(sapply(x, "[[", "id"))
if (length(ids) > 0) {
tmp <- length(ids) == length(x)
if (tmp) TRUE else stop("id field not in every document", call. = FALSE)
} else {
FALSE
}
} else {
stop("input must be list or data.frame", call. = FALSE)
}
}
close_conns <- function() {
cons <- showConnections()
ours <- as.integer(rownames(cons)[grepl("/elastic__", cons[, "description"], fixed = TRUE)])
for (i in ours) {
close(getConnection(i))
}
}
check_named_vectors <- function(x) {
lapply(x, function(z) {
if (!is(z, "list")) {
as.list(z)
} else {
z
}
})
}
# make_bulk_plos(index_name='plosmore', fields=c('id','journal','title','abstract','author'), filename="inst/examples/plos_more_data.json")
make_bulk_plos <- function(n = 1000, index='plos', type='article', fields=c('id','title'), filename = "~/plos_data.json"){
unlink(filename)
args <- ec(list(q = "*:*", rows=n, fl=paste0(fields, collapse = ","), fq='doc_type:full', wt='json'))
res <- GET("http://api.plos.org/search", query=args)
stop_for_status(res)
tt <- jsonlite::fromJSON(content(res, as = "text"), FALSE)
docs <- tt$response$docs
docs <- lapply(docs, function(x){
x[sapply(x, length)==0] <- "null"
lapply(x, function(y) if(length(y) > 1) paste0(y, collapse = ",") else y)
})
for(i in seq_along(docs)){
dat <- list(index = list(`_index` = index, `_type` = type, `_id` = i-1))
cat(proc_doc(dat), sep = "\n", file = filename, append = TRUE)
cat(proc_doc(docs[[i]]), sep = "\n", file = filename, append = TRUE)
}
message(sprintf("File written to %s", filename))
}
proc_doc <- function(x){
b <- jsonlite::toJSON(x, auto_unbox = TRUE)
gsub("\\[|\\]", "", as.character(b))
}
# make_bulk_gbif(900, filename="inst/examples/gbif_data.json")
# make_bulk_gbif(600, "gbifgeo", filename="inst/examples/gbif_geo.json", add_coordinates = TRUE)
make_bulk_gbif <- function(n = 600, index='gbif', type='record', filename = "~/gbif_data.json", add_coordinates=FALSE){
unlink(filename)
res <- lapply(seq(1, n, 300), getgbif)
res <- do.call(c, res)
res <- lapply(res, function(x){
x[sapply(x, length)==0] <- "null"
lapply(x, function(y) if(length(y) > 1) paste0(y, collapse = ",") else y)
})
if(add_coordinates) res <- lapply(res, function(x) c(x, coordinates = sprintf("[%s,%s]", x$decimalLongitude, x$decimalLatitude)))
for(i in seq_along(res)){
dat <- list(index = list(`_index` = index, `_type` = type, `_id` = i-1))
cat(proc_doc(dat), sep = "\n", file = filename, append = TRUE)
cat(proc_doc(res[[i]]), sep = "\n", file = filename, append = TRUE)
}
message(sprintf("File written to %s", filename))
}
getgbif <- function(x){
res <- GET("http://api.gbif.org/v1/occurrence/search", query=list(limit=300, offset=x))
jsonlite::fromJSON(content(res, "text"), FALSE)$results
}
| /elastic/R/docs_bulk.r | no_license | ingted/R-Examples | R | false | false | 12,536 | r | #' Use the bulk API to create, index, update, or delete documents.
#'
#' @export
#' @param x A data.frame or path to a file to load in the bulk API
#' @param index (character) The index name to use. Required for data.frame input, but
#' optional for file inputs.
#' @param type (character) The type name to use. If left as NULL, will be same name as index.
#' @param chunk_size (integer) Size of each chunk. If your data.frame is smaller
#' thank \code{chunk_size}, this parameter is essentially ignored. We write in chunks because
#' at some point, depending on size of each document, and Elasticsearch setup, writing a very
#' large number of documents in one go becomes slow, so chunking can help. This parameter
#' is ignored if you pass a file name. Default: 1000
#' @param doc_ids An optional vector (character or numeric/integer) of document ids to use.
#' This vector has to equal the size of the documents you are passing in, and will error
#' if not. If you pass a factor we convert to character. Default: not passed
#' @param raw (logical) Get raw JSON back or not.
#' @param ... Pass on curl options to \code{\link[httr]{POST}}
#' @details More on the Bulk API:
#' \url{https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html}.
#'
#' This function dispatches on data.frame or character input. Character input has
#' to be a file name or the function stops with an error message.
#'
#' If you pass a data.frame to this function, we by default to an index operation,
#' that is, create the record in the index and type given by those parameters to the
#' function. Down the road perhaps we will try to support other operations on the
#' bulk API. if you pass a file, of course in that file, you can specify any
#' operations you want.
#'
#' Row names are dropped from data.frame, and top level names for a list are dropped
#' as well.
#'
#' A progress bar gives the progress for data.frames and lists
#'
#' @section Large numbers for document IDs:
#' Until recently, if you had very large integers for document IDs, \code{docs_bulk}
#' failed. It should be fixed now. Let us know if not.
#'
#' @examples \dontrun{
#' plosdat <- system.file("examples", "plos_data.json", package = "elastic")
#' docs_bulk(plosdat)
#' aliases_get()
#' index_delete(index='plos')
#' aliases_get()
#'
#' # Curl options
#' library("httr")
#' plosdat <- system.file("examples", "plos_data.json", package = "elastic")
#' docs_bulk(plosdat, config=verbose())
#'
#' # From a data.frame
#' docs_bulk(mtcars, index = "hello", type = "world")
#' docs_bulk(iris, "iris", "flowers")
#' ## type can be missing, but index can not
#' docs_bulk(iris, "flowers")
#' ## big data.frame, 53K rows, load ggplot2 package first
#' # res <- docs_bulk(diamonds, "diam")
#' # Search("diam")$hits$total
#'
#' # From a list
#' docs_bulk(apply(iris, 1, as.list), index="iris", type="flowers")
#' docs_bulk(apply(USArrests, 1, as.list), index="arrests")
#' # dim_list <- apply(diamonds, 1, as.list)
#' # out <- docs_bulk(dim_list, index="diamfromlist")
#'
#' # When using in a loop
#' ## We internally get last _id counter to know where to start on next bulk insert
#' ## but you need to sleep in between docs_bulk calls, longer the bigger the data is
#' files <- c(system.file("examples", "test1.csv", package = "elastic"),
#' system.file("examples", "test2.csv", package = "elastic"),
#' system.file("examples", "test3.csv", package = "elastic"))
#' for (i in seq_along(files)) {
#' d <- read.csv(files[[i]])
#' docs_bulk(d, index = "testes", type = "docs")
#' Sys.sleep(1)
#' }
#' count("testes", "docs")
#' index_delete("testes")
#'
#' # You can include your own document id numbers
#' ## Either pass in as an argument
#' index_create("testes")
#' files <- c(system.file("examples", "test1.csv", package = "elastic"),
#' system.file("examples", "test2.csv", package = "elastic"),
#' system.file("examples", "test3.csv", package = "elastic"))
#' tt <- vapply(files, function(z) NROW(read.csv(z)), numeric(1))
#' ids <- list(1:tt[1],
#' (tt[1] + 1):(tt[1] + tt[2]),
#' (tt[1] + tt[2] + 1):sum(tt))
#' for (i in seq_along(files)) {
#' d <- read.csv(files[[i]])
#' docs_bulk(d, index = "testes", type = "docs", doc_ids = ids[[i]])
#' }
#' count("testes", "docs")
#' index_delete("testes")
#'
#' ## or include in the input data
#' ### from data.frame's
#' index_create("testes")
#' files <- c(system.file("examples", "test1_id.csv", package = "elastic"),
#' system.file("examples", "test2_id.csv", package = "elastic"),
#' system.file("examples", "test3_id.csv", package = "elastic"))
#' readLines(files[[1]])
#' for (i in seq_along(files)) {
#' d <- read.csv(files[[i]])
#' docs_bulk(d, index = "testes", type = "docs")
#' }
#' count("testes", "docs")
#' index_delete("testes")
#'
#' ### from lists via file inputs
#' index_create("testes")
#' for (i in seq_along(files)) {
#' d <- read.csv(files[[i]])
#' d <- apply(d, 1, as.list)
#' docs_bulk(d, index = "testes", type = "docs")
#' }
#' count("testes", "docs")
#' index_delete("testes")
#' }
docs_bulk <- function(x, index = NULL, type = NULL, chunk_size = 1000,
doc_ids = NULL, raw=FALSE, ...) {
UseMethod("docs_bulk")
}
#' @export
docs_bulk.data.frame <- function(x, index = NULL, type = NULL, chunk_size = 1000,
doc_ids = NULL, raw = FALSE, ...) {
checkconn()
if (is.null(index)) {
stop("index can't be NULL when passing a data.frame",
call. = FALSE)
}
if (is.null(type)) type <- index
check_doc_ids(x, doc_ids)
if (is.factor(doc_ids)) doc_ids <- as.character(doc_ids)
row.names(x) <- NULL
rws <- seq_len(NROW(x))
data_chks <- split(rws, ceiling(seq_along(rws) / chunk_size))
if (!is.null(doc_ids)) {
id_chks <- split(doc_ids, ceiling(seq_along(doc_ids) / chunk_size))
} else if (has_ids(x)) {
rws <- x$id
id_chks <- split(rws, ceiling(seq_along(rws) / chunk_size))
} else {
rws <- shift_start(rws, index, type)
id_chks <- split(rws, ceiling(seq_along(rws) / chunk_size))
}
pb <- txtProgressBar(min = 0, max = length(data_chks), initial = 0, style = 3)
on.exit(close(pb))
for (i in seq_along(data_chks)) {
setTxtProgressBar(pb, i)
docs_bulk(make_bulk(x[data_chks[[i]], ], index, type, id_chks[[i]]), ...)
}
}
#' @export
docs_bulk.list <- function(x, index = NULL, type = NULL, chunk_size = 1000,
doc_ids = NULL, raw = FALSE, ...) {
checkconn()
if (is.null(index)) {
stop("index can't be NULL when passing a list",
call. = FALSE)
}
if (is.null(type)) type <- index
check_doc_ids(x, doc_ids)
if (is.factor(doc_ids)) doc_ids <- as.character(doc_ids)
x <- unname(x)
x <- check_named_vectors(x)
rws <- seq_len(length(x))
data_chks <- split(rws, ceiling(seq_along(rws) / chunk_size))
if (!is.null(doc_ids)) {
id_chks <- split(doc_ids, ceiling(seq_along(doc_ids) / chunk_size))
} else if (has_ids(x)) {
rws <- as.numeric(sapply(x, "[[", "id"))
id_chks <- split(rws, ceiling(seq_along(rws) / chunk_size))
} else {
rws <- shift_start(rws, index, type)
id_chks <- split(rws, ceiling(seq_along(rws) / chunk_size))
}
pb <- txtProgressBar(min = 0, max = length(data_chks), initial = 0, style = 3)
on.exit(close(pb))
for (i in seq_along(data_chks)) {
setTxtProgressBar(pb, i)
docs_bulk(make_bulk(x[data_chks[[i]]], index, type, id_chks[[i]]), ...)
}
}
#' @export
docs_bulk.character <- function(x, index = NULL, type = NULL, chunk_size = 1000,
doc_ids = NULL, raw=FALSE, ...) {
on.exit(close_conns())
checkconn()
stopifnot(file.exists(x))
conn <- es_get_auth()
url <- paste0(conn$base, ":", conn$port, '/_bulk')
tt <- POST(url, make_up(), ..., body = upload_file(x, type = "application/json"), encode = "json")
if (tt$status_code > 202) {
if (tt$status_code > 202) stop(content(tt)$error)
if (content(tt)$status == "ERROR" | content(tt)$status == 500) stop(content(tt)$error_message)
}
res <- content(tt, as = "text")
res <- structure(res, class = "bulk_make")
if (raw) res else es_parse(res)
}
make_bulk <- function(df, index, type, counter) {
if (!is.character(counter)) {
if (max(counter) >= 10000000000) {
scipen <- getOption("scipen")
options(scipen = 100)
on.exit(options(scipen = scipen))
}
}
metadata_fmt <- if (is.character(counter)) {
'{"index":{"_index":"%s","_type":"%s","_id":"%s"}}'
} else {
'{"index":{"_index":"%s","_type":"%s","_id":%s}}'
}
metadata <- sprintf(
metadata_fmt,
index,
type,
if (is.numeric(counter)) {
counter - 1L
} else {
counter
}
)
data <- jsonlite::toJSON(df, collapse = FALSE)
tmpf <- tempfile("elastic__")
writeLines(paste(metadata, data, sep = "\n"), tmpf)
invisible(tmpf)
}
shift_start <- function(vals, index, type = NULL) {
num <- tryCatch(count(index, type), error = function(e) e)
if (is(num, "error")) {
vals
} else {
vals + num
}
}
check_doc_ids <- function(x, ids) {
if (!is.null(ids)) {
# check class type
if (!class(ids) %in% c('character', 'factor', 'numeric', 'integer')) {
stop("doc_ids must be of class character, numeric or integer", call. = FALSE)
}
# check appropriate length
if (!all(1:NROW(x) == 1:length(ids))) {
stop("doc_ids length must equal number of documents", call. = FALSE)
}
}
}
has_ids <- function(x) {
if (is(x, "data.frame")) {
"id" %in% names(x)
} else if (is(x, "list")) {
ids <- ec(sapply(x, "[[", "id"))
if (length(ids) > 0) {
tmp <- length(ids) == length(x)
if (tmp) TRUE else stop("id field not in every document", call. = FALSE)
} else {
FALSE
}
} else {
stop("input must be list or data.frame", call. = FALSE)
}
}
close_conns <- function() {
cons <- showConnections()
ours <- as.integer(rownames(cons)[grepl("/elastic__", cons[, "description"], fixed = TRUE)])
for (i in ours) {
close(getConnection(i))
}
}
check_named_vectors <- function(x) {
lapply(x, function(z) {
if (!is(z, "list")) {
as.list(z)
} else {
z
}
})
}
# make_bulk_plos(index_name='plosmore', fields=c('id','journal','title','abstract','author'), filename="inst/examples/plos_more_data.json")
make_bulk_plos <- function(n = 1000, index='plos', type='article', fields=c('id','title'), filename = "~/plos_data.json"){
unlink(filename)
args <- ec(list(q = "*:*", rows=n, fl=paste0(fields, collapse = ","), fq='doc_type:full', wt='json'))
res <- GET("http://api.plos.org/search", query=args)
stop_for_status(res)
tt <- jsonlite::fromJSON(content(res, as = "text"), FALSE)
docs <- tt$response$docs
docs <- lapply(docs, function(x){
x[sapply(x, length)==0] <- "null"
lapply(x, function(y) if(length(y) > 1) paste0(y, collapse = ",") else y)
})
for(i in seq_along(docs)){
dat <- list(index = list(`_index` = index, `_type` = type, `_id` = i-1))
cat(proc_doc(dat), sep = "\n", file = filename, append = TRUE)
cat(proc_doc(docs[[i]]), sep = "\n", file = filename, append = TRUE)
}
message(sprintf("File written to %s", filename))
}
proc_doc <- function(x){
b <- jsonlite::toJSON(x, auto_unbox = TRUE)
gsub("\\[|\\]", "", as.character(b))
}
# make_bulk_gbif(900, filename="inst/examples/gbif_data.json")
# make_bulk_gbif(600, "gbifgeo", filename="inst/examples/gbif_geo.json", add_coordinates = TRUE)
make_bulk_gbif <- function(n = 600, index='gbif', type='record', filename = "~/gbif_data.json", add_coordinates=FALSE){
unlink(filename)
res <- lapply(seq(1, n, 300), getgbif)
res <- do.call(c, res)
res <- lapply(res, function(x){
x[sapply(x, length)==0] <- "null"
lapply(x, function(y) if(length(y) > 1) paste0(y, collapse = ",") else y)
})
if(add_coordinates) res <- lapply(res, function(x) c(x, coordinates = sprintf("[%s,%s]", x$decimalLongitude, x$decimalLatitude)))
for(i in seq_along(res)){
dat <- list(index = list(`_index` = index, `_type` = type, `_id` = i-1))
cat(proc_doc(dat), sep = "\n", file = filename, append = TRUE)
cat(proc_doc(res[[i]]), sep = "\n", file = filename, append = TRUE)
}
message(sprintf("File written to %s", filename))
}
getgbif <- function(x){
res <- GET("http://api.gbif.org/v1/occurrence/search", query=list(limit=300, offset=x))
jsonlite::fromJSON(content(res, "text"), FALSE)$results
}
|
\name{parameters}
\alias{parameters}
\alias{kr}
\title{
Central probabilty
}
\description{
Probability of observing r NN distances at distance c, all previous NN distances at distance < c and all following NN distances at a distance > c
}
\usage{
parameters(r, i0, c, N)
kr(r, i0, c)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{r}{
the number of points that are at the same distance c
}
\item{i0}{
which i0-th nearest neighbour we are considering.
}
\item{c}{
the distance of the i-th nearest neighbour
}
\item{N}{
sample size
}
}
\value{
for \code{kr} the number of possibilities to place r points onto the same distance when we already observed i0 points at a smaller distance
for \code{parameters} the probability of observing r NN distances at distance c, all previous NN distances at distance < c and all following NN distances at a distance > c
}
\author{
Sebastian Dümcke \email{duemcke@mpipz.mpg.de}
}
\examples{
knnIndep:::kr(3,5,6)
knnIndep:::parameters(3,5,6,20)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/parameters.Rd | no_license | cran/knnIndep | R | false | false | 1,204 | rd | \name{parameters}
\alias{parameters}
\alias{kr}
\title{
Central probabilty
}
\description{
Probability of observing r NN distances at distance c, all previous NN distances at distance < c and all following NN distances at a distance > c
}
\usage{
parameters(r, i0, c, N)
kr(r, i0, c)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{r}{
the number of points that are at the same distance c
}
\item{i0}{
which i0-th nearest neighbour we are considering.
}
\item{c}{
the distance of the i-th nearest neighbour
}
\item{N}{
sample size
}
}
\value{
for \code{kr} the number of possibilities to place r points onto the same distance when we already observed i0 points at a smaller distance
for \code{parameters} the probability of observing r NN distances at distance c, all previous NN distances at distance < c and all following NN distances at a distance > c
}
\author{
Sebastian Dümcke \email{duemcke@mpipz.mpg.de}
}
\examples{
knnIndep:::kr(3,5,6)
knnIndep:::parameters(3,5,6,20)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/standardize.names.R
\name{standardize.names}
\alias{standardize.names}
\title{Standardize taxonomic names}
\usage{
standardize.names(taxon)
}
\arguments{
\item{taxon}{a character vector containing a single name}
}
\value{
a character vector
}
\description{
This function standardizes taxa names. It is used mainly internally, but might be
helpful to the end user in some situations.
}
\examples{
\dontrun{
standardize.names("Miconia sp 01")
standardize.names("Miconia Sp 2")
standardize.names("Sp18")
}
}
| /man/standardize.names.Rd | no_license | gustavobio/flora | R | false | true | 583 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/standardize.names.R
\name{standardize.names}
\alias{standardize.names}
\title{Standardize taxonomic names}
\usage{
standardize.names(taxon)
}
\arguments{
\item{taxon}{a character vector containing a single name}
}
\value{
a character vector
}
\description{
This function standardizes taxa names. It is used mainly internally, but might be
helpful to the end user in some situations.
}
\examples{
\dontrun{
standardize.names("Miconia sp 01")
standardize.names("Miconia Sp 2")
standardize.names("Sp18")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/countrycode.R
\name{countrycode}
\alias{countrycode}
\title{Convert Country Codes}
\usage{
countrycode(sourcevar, origin, destination, warn = FALSE, dictionary = NULL,
extra = NULL)
}
\arguments{
\item{sourcevar}{Vector which contains the codes or country names to be converted}
\item{origin}{Coding scheme of origin (name enclosed in quotes "")}
\item{destination}{Coding scheme of destination (name enclosed in quotes "")}
\item{warn}{Prints unique elements from sourcevar for which no match was found}
\item{dictionary}{A data frame which supplies custom country codes.
Variables correspond to country codes, observations must refer to unique
countries. When countrycode uses a user-supplied dictionary, no sanity
checks are conducted. The data frame format must resemble
countrycode::countrycode_data. Custom dictionaries only work with strings
(no regexes).}
\item{extra}{A data frame which supplies additional country codes in the scheme
chosen by origin/destination, to supplement the official list. Must be a two-
column data frame or a list of two vectors. Column names must match if used.
Warnings will be suppressed if a match is returned from this data frame.
Regexes not supported.}
}
\description{
Converts long country names into one of many different coding schemes.
Translates from one scheme to another. Converts country name or coding
scheme to the official short English country name. Creates a new variable
with the name of the continent or region to which each country belongs.
}
\note{
Supports the following coding schemes: Correlates of War character,
CoW-numeric, ISO3-character, ISO3-numeric, ISO2-character, IMF numeric, International
Olympic Committee, FIPS 10-4, FAO numeric, United Nations numeric,
World Bank character, official English short country names (ISO), continent, region.
The following strings can be used as arguments for \code{origin} or
\code{destination}: "cowc", "cown", "iso3c", "iso3n", "iso2c", "imf",
"fips104", "fao", "ioc", "un", "wb", "country.name". The following strings can be
used as arguments for \code{destination} \emph{only}: "continent", "region",
"eu28", "ar5"
}
\examples{
codes.of.origin <- countrycode::countrycode_data$cowc # Vector of values to be converted
countrycode(codes.of.origin, "cowc", "iso3c")
two_letter <- c("AU", "US", "XK") # World Bank uses user-assigned XK for Kosovo
countrycode(two_letter, "iso2c", "country.name", warn=TRUE)
countrycode(two_letter, "iso2c", "country.name", warn=TRUE,
extra=list(c("XK", "JG"),c("Kosovo", "Channel Islands")))
}
\keyword{countrycode}
| /man/countrycode.Rd | no_license | econandrew/countrycode | R | false | true | 2,676 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/countrycode.R
\name{countrycode}
\alias{countrycode}
\title{Convert Country Codes}
\usage{
countrycode(sourcevar, origin, destination, warn = FALSE, dictionary = NULL,
extra = NULL)
}
\arguments{
\item{sourcevar}{Vector which contains the codes or country names to be converted}
\item{origin}{Coding scheme of origin (name enclosed in quotes "")}
\item{destination}{Coding scheme of destination (name enclosed in quotes "")}
\item{warn}{Prints unique elements from sourcevar for which no match was found}
\item{dictionary}{A data frame which supplies custom country codes.
Variables correspond to country codes, observations must refer to unique
countries. When countrycode uses a user-supplied dictionary, no sanity
checks are conducted. The data frame format must resemble
countrycode::countrycode_data. Custom dictionaries only work with strings
(no regexes).}
\item{extra}{A data frame which supplies additional country codes in the scheme
chosen by origin/destination, to supplement the official list. Must be a two-
column data frame or a list of two vectors. Column names must match if used.
Warnings will be suppressed if a match is returned from this data frame.
Regexes not supported.}
}
\description{
Converts long country names into one of many different coding schemes.
Translates from one scheme to another. Converts country name or coding
scheme to the official short English country name. Creates a new variable
with the name of the continent or region to which each country belongs.
}
\note{
Supports the following coding schemes: Correlates of War character,
CoW-numeric, ISO3-character, ISO3-numeric, ISO2-character, IMF numeric, International
Olympic Committee, FIPS 10-4, FAO numeric, United Nations numeric,
World Bank character, official English short country names (ISO), continent, region.
The following strings can be used as arguments for \code{origin} or
\code{destination}: "cowc", "cown", "iso3c", "iso3n", "iso2c", "imf",
"fips104", "fao", "ioc", "un", "wb", "country.name". The following strings can be
used as arguments for \code{destination} \emph{only}: "continent", "region",
"eu28", "ar5"
}
\examples{
codes.of.origin <- countrycode::countrycode_data$cowc # Vector of values to be converted
countrycode(codes.of.origin, "cowc", "iso3c")
two_letter <- c("AU", "US", "XK") # World Bank uses user-assigned XK for Kosovo
countrycode(two_letter, "iso2c", "country.name", warn=TRUE)
countrycode(two_letter, "iso2c", "country.name", warn=TRUE,
extra=list(c("XK", "JG"),c("Kosovo", "Channel Islands")))
}
\keyword{countrycode}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recipes-step_box_cox.R
\name{step_box_cox}
\alias{step_box_cox}
\alias{tidy.step_box_cox}
\title{Box-Cox Transformation using Forecast Methods}
\usage{
step_box_cox(
recipe,
...,
method = c("guerrero", "loglik"),
limits = c(-1, 2),
role = NA,
trained = FALSE,
lambdas_trained = NULL,
skip = FALSE,
id = rand_id("box_cox")
)
\method{tidy}{step_box_cox}(x, ...)
}
\arguments{
\item{recipe}{A \code{recipe} object. The step will be added to the sequence of operations for this recipe.}
\item{...}{One or more selector functions to choose which
variables are affected by the step. See \code{\link[=selections]{selections()}}
for more details. For the \code{tidy} method, these are not
currently used.}
\item{method}{One of "guerrero" or "loglik"}
\item{limits}{A length 2 numeric vector defining the range to
compute the transformation parameter lambda.}
\item{role}{Not used by this step since no new variables are
created.}
\item{trained}{A logical to indicate if the quantities for preprocessing have been estimated.}
\item{lambdas_trained}{A numeric vector of transformation values. This
is \code{NULL} until computed by \code{prep()}.}
\item{skip}{A logical. Should the step be skipped when the recipe
is baked by \code{bake.recipe()}? While all operations are baked when \code{prep.recipe()} is run,
some operations may not be able to be conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect the computations for subsequent operations.}
\item{id}{A character string that is unique to this step to identify it.}
\item{x}{A \code{step_box_cox} object.}
}
\value{
An updated version of \code{recipe} with the new step
added to the sequence of existing steps (if any). For the
\code{tidy} method, a tibble with columns \code{terms} (the
selectors or variables selected) and \code{value} (the
lambda estimate).
}
\description{
\code{step_box_cox} creates a \emph{specification} of a recipe
step that will transform data using a Box-Cox
transformation. This function differs from
\code{recipes::step_BoxCox} by adding multiple methods
including Guerrero lambda optimization and handling for
negative data used in the Forecast R Package.
}
\details{
The \code{step_box_cox()} function is designed specifically to handle time series
using methods implemented in the Forecast R Package.
\strong{Negative Data}
This function can be applied to Negative Data.
\strong{Lambda Optimization Methods}
This function uses 2 methods for optimizing the lambda selection
from the Forecast R Package:
\enumerate{
\item \code{method = "guerrero"}: Guerrero's (1993) method is used, where lambda minimizes
the coefficient of variation for subseries of x.
\item \code{method = loglik}: the value of lambda is chosen to maximize the profile
log likelihood of a linear model fitted to x. For non-seasonal data, a
linear time trend is fitted while for seasonal data, a linear time trend
with seasonal dummy variables is used.
}
}
\examples{
library(dplyr)
library(tidyr)
library(recipes)
library(timetk)
FANG_wide <- FANG \%>\%
select(symbol, date, adjusted) \%>\%
pivot_wider(names_from = symbol, values_from = adjusted)
recipe_box_cox <- recipe(~ ., data = FANG_wide) \%>\%
step_box_cox(FB, AMZN, NFLX, GOOG) \%>\%
prep()
recipe_box_cox \%>\% bake(FANG_wide)
recipe_box_cox \%>\% tidy(1)
}
\references{
\enumerate{
\item Guerrero, V.M. (1993) Time-series analysis supported by power transformations. \emph{Journal of Forecasting}, \strong{12}, 37–48.
\item Box, G. E. P. and Cox, D. R. (1964) An analysis of transformations. \emph{JRSS} B \strong{26} 211–246.
}
}
\seealso{
Time Series Analysis:
\itemize{
\item Engineered Features: \code{\link[=step_timeseries_signature]{step_timeseries_signature()}}, \code{\link[=step_holiday_signature]{step_holiday_signature()}}, \code{\link[=step_fourier]{step_fourier()}}
\item Diffs & Lags \code{\link[=step_diff]{step_diff()}}, \code{recipes::step_lag()}
\item Smoothing: \code{\link[=step_slidify]{step_slidify()}}, \code{\link[=step_smooth]{step_smooth()}}
\item Variance Reduction: \code{\link[=step_box_cox]{step_box_cox()}}
\item Imputation: \code{\link[=step_ts_impute]{step_ts_impute()}}, \code{\link[=step_ts_clean]{step_ts_clean()}}
\item Padding: \code{\link[=step_ts_pad]{step_ts_pad()}}
}
Transformations to reduce variance:
\itemize{
\item \code{recipes::step_log()} - Log transformation
\item \code{recipes::step_sqrt()} - Square-Root Power Transformation
}
Recipe Setup and Application:
\itemize{
\item \code{recipes::recipe()}
\item \code{recipes::prep()}
\item \code{recipes::bake()}
}
}
| /man/step_box_cox.Rd | no_license | business-science/timetk | R | false | true | 4,736 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recipes-step_box_cox.R
\name{step_box_cox}
\alias{step_box_cox}
\alias{tidy.step_box_cox}
\title{Box-Cox Transformation using Forecast Methods}
\usage{
step_box_cox(
recipe,
...,
method = c("guerrero", "loglik"),
limits = c(-1, 2),
role = NA,
trained = FALSE,
lambdas_trained = NULL,
skip = FALSE,
id = rand_id("box_cox")
)
\method{tidy}{step_box_cox}(x, ...)
}
\arguments{
\item{recipe}{A \code{recipe} object. The step will be added to the sequence of operations for this recipe.}
\item{...}{One or more selector functions to choose which
variables are affected by the step. See \code{\link[=selections]{selections()}}
for more details. For the \code{tidy} method, these are not
currently used.}
\item{method}{One of "guerrero" or "loglik"}
\item{limits}{A length 2 numeric vector defining the range to
compute the transformation parameter lambda.}
\item{role}{Not used by this step since no new variables are
created.}
\item{trained}{A logical to indicate if the quantities for preprocessing have been estimated.}
\item{lambdas_trained}{A numeric vector of transformation values. This
is \code{NULL} until computed by \code{prep()}.}
\item{skip}{A logical. Should the step be skipped when the recipe
is baked by \code{bake.recipe()}? While all operations are baked when \code{prep.recipe()} is run,
some operations may not be able to be conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect the computations for subsequent operations.}
\item{id}{A character string that is unique to this step to identify it.}
\item{x}{A \code{step_box_cox} object.}
}
\value{
An updated version of \code{recipe} with the new step
added to the sequence of existing steps (if any). For the
\code{tidy} method, a tibble with columns \code{terms} (the
selectors or variables selected) and \code{value} (the
lambda estimate).
}
\description{
\code{step_box_cox} creates a \emph{specification} of a recipe
step that will transform data using a Box-Cox
transformation. This function differs from
\code{recipes::step_BoxCox} by adding multiple methods
including Guerrero lambda optimization and handling for
negative data used in the Forecast R Package.
}
\details{
The \code{step_box_cox()} function is designed specifically to handle time series
using methods implemented in the Forecast R Package.
\strong{Negative Data}
This function can be applied to Negative Data.
\strong{Lambda Optimization Methods}
This function uses 2 methods for optimizing the lambda selection
from the Forecast R Package:
\enumerate{
\item \code{method = "guerrero"}: Guerrero's (1993) method is used, where lambda minimizes
the coefficient of variation for subseries of x.
\item \code{method = loglik}: the value of lambda is chosen to maximize the profile
log likelihood of a linear model fitted to x. For non-seasonal data, a
linear time trend is fitted while for seasonal data, a linear time trend
with seasonal dummy variables is used.
}
}
\examples{
library(dplyr)
library(tidyr)
library(recipes)
library(timetk)
FANG_wide <- FANG \%>\%
select(symbol, date, adjusted) \%>\%
pivot_wider(names_from = symbol, values_from = adjusted)
recipe_box_cox <- recipe(~ ., data = FANG_wide) \%>\%
step_box_cox(FB, AMZN, NFLX, GOOG) \%>\%
prep()
recipe_box_cox \%>\% bake(FANG_wide)
recipe_box_cox \%>\% tidy(1)
}
\references{
\enumerate{
\item Guerrero, V.M. (1993) Time-series analysis supported by power transformations. \emph{Journal of Forecasting}, \strong{12}, 37–48.
\item Box, G. E. P. and Cox, D. R. (1964) An analysis of transformations. \emph{JRSS} B \strong{26} 211–246.
}
}
\seealso{
Time Series Analysis:
\itemize{
\item Engineered Features: \code{\link[=step_timeseries_signature]{step_timeseries_signature()}}, \code{\link[=step_holiday_signature]{step_holiday_signature()}}, \code{\link[=step_fourier]{step_fourier()}}
\item Diffs & Lags \code{\link[=step_diff]{step_diff()}}, \code{recipes::step_lag()}
\item Smoothing: \code{\link[=step_slidify]{step_slidify()}}, \code{\link[=step_smooth]{step_smooth()}}
\item Variance Reduction: \code{\link[=step_box_cox]{step_box_cox()}}
\item Imputation: \code{\link[=step_ts_impute]{step_ts_impute()}}, \code{\link[=step_ts_clean]{step_ts_clean()}}
\item Padding: \code{\link[=step_ts_pad]{step_ts_pad()}}
}
Transformations to reduce variance:
\itemize{
\item \code{recipes::step_log()} - Log transformation
\item \code{recipes::step_sqrt()} - Square-Root Power Transformation
}
Recipe Setup and Application:
\itemize{
\item \code{recipes::recipe()}
\item \code{recipes::prep()}
\item \code{recipes::bake()}
}
}
|
# ================
# tokenize strings
# ================
tokenize <- function(strings
, profile = NULL
, transliterate = NULL
, method = "global"
, ordering = c("size","context","reverse")
, sep = " "
, sep.replace = NULL
, missing = "\u2047"
, normalize = "NFC"
, regex = FALSE
, silent = FALSE
, file.out = NULL
) {
# ---------------
# preprocess data
# ---------------
strings <- as.character(strings)
# option gives errors, so removed for now
case.insensitive = FALSE
# separators
internal_sep <- intToUtf8(1110000)
user_sep <- sep
# normalization
if (normalize == "NFC") {
transcode <- stri_trans_nfc
} else if (normalize == "NFD") {
transcode <- stri_trans_nfd
} else {
warning("Only the normalization-options NFC and NFD are implemented. No normalization will be performed.")
transcode <- identity
}
# keep original strings, and normalize NFC everything by default
originals <- as.vector(strings)
strings <- transcode(originals)
# collapse strings for doing everything at once
NAs <- which(is.na(strings))
strings[NAs] <- ""
all <- paste(strings, collapse = internal_sep)
all <- paste0(internal_sep, all, internal_sep)
# --------------------
# read or make profile
# --------------------
# read orthography profile (or make new one)
if (is.null(profile)) {
# make new orthography profile
if (normalize == "NFC") {
profile <- write.profile(strings
, normalize = normalize
, sep = NULL
, info = FALSE
)
} else {
profile <- write.profile(strings
, normalize = normalize
, sep = ""
, info = FALSE
)
}
} else if (is.null(dim(profile))) {
# use the provided profile
if (length(profile) > 1) {
# assume that the strings are graphemes
profile <- data.frame(Grapheme = profile
, stringsAsFactors = FALSE
)
} else {
# read profile from file
profile <- read.table(profile
, sep = "\t"
, quote = ""
, header = TRUE
, fill = TRUE
, colClasses = "character"
)
}
} else {
# assume the profile is a suitable R object
profile <- profile
}
# first-pass reordering, only getting larger graphemes on top
# ordering by grapheme size, if specified
# necessary to get regexes in right order
if (sum(!is.na(pmatch(ordering,"size"))) > 0) {
size <- nchar(stri_trans_nfd(profile[,"Grapheme"]))
profile <- profile[order(-size), ,drop = FALSE]
}
# normalise characters in profile, just to be sure
graphs <- transcode(profile[,"Grapheme"])
if (!is.null(transliterate)) {
trans <- transcode(profile[,transliterate])
}
# is there contextual information?
l_exists <- sum(colnames(profile) == "Left") == 1
r_exists <- sum(colnames(profile) == "Right") == 1
c_exists <- sum(colnames(profile) == "Class") == 1
# then normalise them too
if (l_exists) {
left <- transcode(profile[,"Left"])
} else {
left <- ""
}
if (r_exists) {
right <- transcode(profile[,"Right"])
} else {
right <- ""
}
# -----------------------------------------
# prepare regexes with context from profile
# -----------------------------------------
if (!regex) {
contexts <- graphs
} else {
# replace regex boundaries with internal separator
tmp <- intToUtf8(1110001)
right <- gsub("\\$", tmp, right, fixed = TRUE)
right <- gsub("\\$$", internal_sep, right)
right <- gsub(tmp, "\\$", right, fixed = TRUE)
left <- gsub("^\\^", internal_sep, left)
left <- gsub("([^\\[])\\^", paste0("\\1",internal_sep), left)
graphs <- gsub("\\$", tmp, graphs, fixed = TRUE)
graphs <- gsub("\\$$", internal_sep, graphs)
graphs <- gsub(tmp, "\\$", graphs, fixed = TRUE)
graphs <- gsub("^\\^", internal_sep, graphs)
graphs <- gsub("^\\.", paste0("[^", internal_sep, "]"), graphs)
# make classes if there is anything there
if (c_exists && sum(profile[,"Class"] != "") > 0) {
classes <- unique(profile[,"Class"])
classes <- classes[classes != ""]
groups <- sapply(classes,function(x){
graphs[profile[,"Class"] == x]
})
classes.regex <- sapply(groups,function(x){
paste( "((", paste( x, collapse = ")|(" ), "))", sep = "")
})
for (i in classes) {
left <- gsub(i, classes.regex[i], left, fixed = TRUE)
right <- gsub(i, classes.regex[i], right, fixed = TRUE)
graphs <- gsub(i, classes.regex[i], graphs, fixed = TRUE)
}
}
# add lookahead/lookbehind syntax and combine everything together
left[left != ""] <- paste("(?<=", left[left != ""], ")", sep = "")
right[right != ""] <- paste("(?=", right[right != ""], ")", sep = "")
# replace dot in context with internal separator
left <- gsub("(?<=."
, paste0("(?<!", internal_sep, ")(?<=" )
, left
, fixed = TRUE
)
right <- gsub("(?=."
, paste0("(?!", internal_sep, ")(?=" )
, right
, fixed = TRUE
)
contexts <- paste0(left, graphs, right)
}
# -----------------
# reorder graphemes
# -----------------
if (is.null(ordering)) {
graph_order <- 1:length(graphs)
} else {
# ordering by grapheme size
if (sum(!is.na(pmatch(ordering,"size"))) > 0) {
size <- nchar(stri_trans_nfd(graphs))
} else {
size <- rep(T, times = length(graphs))
}
# ordering by existing of context
if (regex && (l_exists || r_exists)) {
context <- (left != "" | right != "")
} else {
context <- rep(T, times = length(graphs))
}
# reverse ordering
if (sum(!is.na(pmatch(ordering,"reverse"))) > 0) {
reverse <- length(graphs):1
} else {
reverse <- 1:length(graphs)
}
# ordering by frequency of occurrence
if (sum(!is.na(pmatch(ordering,"frequency"))) > 0) {
frequency <- stri_count_regex(all
, pattern = contexts
, literal = !regex
, case_insensitive = case.insensitive
)
} else {
frequency <- rep(T, times = length(graphs))
}
# order according to dimensions chosen by user in "ordering"
dimensions <- list( size = - size # largest size first
, context = - context # with context first
, reverse = reverse # reverse first
, frequency = frequency # lowest frequency first
)
graph_order <- do.call(order, dimensions[ordering])
}
# change order
graphs <- graphs[graph_order]
contexts <- contexts[graph_order]
if (!is.null(transliterate)) {
trans <- trans[graph_order]
}
# --------------
# regex matching
# --------------
if (!regex) {
matches <- stri_locate_all_fixed(
all
, pattern = contexts
, overlap = TRUE
, case_insensitive = case.insensitive
)
matched_parts <- stri_extract_all_fixed(
all
, pattern = contexts
, overlap = TRUE
, case_insensitive = case.insensitive
)
} else {
matches <- stri_locate_all_regex(
all
, pattern = contexts
, case_insensitive = case.insensitive
)
matched_parts <- stri_extract_all_regex(
all
, pattern = contexts
, case_insensitive = case.insensitive
)
}
# --------------------------------------
# tokenize data, either global or linear
# --------------------------------------
if (!is.na(pmatch(method,"global"))) {
# =================
# function to check whether the match is still free
# and insert graph into "taken" when free
test_match <- function(context_nr) {
m <- matches[[context_nr]]
# check whether match is not yet taken
not.already.taken <- apply(m, 1, function(x) {
if (is.na(x[1])) { NA } else {
prod(is.na(taken[x[1]:x[2]])) == 1
}})
free <- which(not.already.taken)
if (length(free) > 0) {
no.self.overlap <- c(TRUE
, head(m[free,,drop = FALSE][,2],-1) <
tail(m[free,,drop = FALSE][,1],-1)
)
free <- free[no.self.overlap]
}
# check whether graph is regex with multiple matches
different_graphs <- unique(matched_parts[[context_nr]])
is.regex <- length(unique(different_graphs)) > 1
# take possible matches
for (x in free) {
r <- m[x,]
if (!is.regex) {
taken[r[1]:r[2]] <<- different_graphs
} else {
taken[r[1]:r[2]] <<- matched_parts[[context_nr]][x]
}
}
return(m[free, , drop = FALSE])
}
# =================
# preparation
taken <- rep(NA, times = nchar(all))
# select matches
selected <- sapply(1:length(matches), test_match, simplify = FALSE)
# count number of matches per rule
matched_rules <- sapply(selected, dim)[1,]
# insert internal separator
where_sep <- stri_locate_all_fixed(all, internal_sep)[[1]][,1]
taken[where_sep] <- internal_sep
# remaining NAs are missing parts
missing_chars <- sapply(which(is.na(taken))
, function(x) { stri_sub(all, x, x) }
)
taken[is.na(taken)] <- missing
# transliteration
if (!is.null(transliterate)) {
transliterated <- taken
sapply(1:length(selected), function(x) {
apply(selected[[x]], 1, function(y) {
transliterated[y[1]:y[2]] <<- trans[x]
})
})
}
# =================
# functions to turn matches into tokenized strings
reduce <- function(taken) {
# replace longer graphs with NA, then na.omit
sapply(selected, function(x) {
apply(x, 1, function(y) {
if (y[1] < y[2]) {
taken[(y[1]+1) : y[2]] <<- NA
}
})
})
result <- na.omit(taken)
return(result)
}
postprocess <- function(taken) {
# replace separator
if (!is.null(sep.replace)) {
taken[taken == user_sep] <- sep.replace
}
# bind together tokenized parts with user separator
taken <- paste(taken, collapse = user_sep)
# remove multiple internal user separators
taken <- gsub(paste0(user_sep,"{2,10}"), user_sep, taken)
# Split string by internal separator
result <- strsplit(taken, split = internal_sep)[[1]][-1]
# remove user_sep at start and end
result <- substr(x = result
, start = nchar(user_sep)+1
, stop = nchar(result)-nchar(user_sep)
)
return(result)
}
# =================
# make one string of the parts selected
tokenized <- postprocess(reduce(taken))
# make one string of transliterations
if (!is.null(transliterate)) {
transliterated <- postprocess(reduce(transliterated))
}
# ---------------------------------------------------------
# finite-state transducer behaviour when parsing = "linear"
# ---------------------------------------------------------
} else if (!is.na(pmatch(method,"linear"))) {
# preparations
all.matches <- do.call(rbind,matches)[,1]
position <- 1
tokenized <- c()
transliterated <- c()
missing_chars <- c()
matched_rules <- rep.int(x = 0, times = length(contexts))
where_sep <- stri_locate_all_fixed(all, internal_sep)[[1]][,1]
graphs_match_list <- unlist(matched_parts)
contexts_match_list <- rep(1:length(matches)
, times = sapply(matches, dim)[1,]
)
if (!is.null(transliterate)) {
trans_match_list <- rep(trans
, times = sapply(matches, dim)[1,]
)
}
# loop through all positions and take first match
while(position <= nchar(all)) {
if (position %in% where_sep) {
tokenized <- c(tokenized, internal_sep)
if (!is.null(transliterate)) {
transliterated <- c(transliterated, internal_sep)
}
position <- position +1
} else {
hit <- which(all.matches == position)[1]
if (is.na(hit)) {
tokenized <- c(tokenized, missing)
missing_chars <- c(missing_chars
, substr(all, position, position)
)
if (!is.null(transliterate)) {
transliterated <- c(transliterated, missing)
}
position <- position + 1
} else {
tokenized <- c(tokenized, graphs_match_list[hit])
if (!is.null(transliterate)) {
transliterated <- c(transliterated, trans_match_list[hit])
}
position <- position + nchar(graphs_match_list[hit])
rule <- contexts_match_list[hit]
matched_rules[rule] <- matched_rules[rule] + 1
}
}
}
# =============
postprocess <- function(taken) {
# bind together tokenized parts with user separator
taken <- paste(taken, collapse = user_sep)
# Split string by internal separator
result <- strsplit(taken, split = internal_sep)[[1]]
# remove user_sep at start and end
result <- substr(result, 2, nchar(result)-1)
result <- result[-1]
return(result)
}
# =============
# postprocessing
tokenized <- postprocess(tokenized)
if (!is.null(transliterate)) {
transliterated <- postprocess(transliterated)
}
} else {
stop(paste0("The tokenization method \"",method,"\" is not defined"))
}
# ----------------------
# preparation of results
# ----------------------
tokenized[NAs] <- NA
if (is.null(transliterate)) {
strings.out <- data.frame(
cbind(originals = originals
, tokenized = tokenized
)
, stringsAsFactors = FALSE
)
} else {
transliterated[NAs] <- NA
strings.out <- data.frame(
cbind(originals = originals
, tokenized = tokenized
, transliterated = transliterated
)
, stringsAsFactors = FALSE
)
}
# Make a list of missing and throw warning
whichProblems <- grep(pattern = missing, x = tokenized)
problems <- strings.out[whichProblems, c(1,2)]
colnames(problems) <- c("originals", "errors")
if ( nrow(problems) > 0) {
# make a profile for missing characters
problemChars <- write.profile(missing_chars)
if ( !silent ) {
warning("\nThere were unknown characters found in the input data.\nCheck output$errors for a table with all problematic strings.")
}
} else {
problems <- NULL
problemChars <- NULL
}
# Reorder profile according to order and add frequency of rule-use
# frequency <- head(frequency, -1)
profile.out <- data.frame(profile[graph_order,]
, stringsAsFactors = FALSE
)
if (ncol(profile.out) == 1) {colnames(profile.out) <- "Grapheme"}
profile.out <- cbind(matched_rules, profile.out)
# --------------
# output as list
# --------------
result <- list(strings = strings.out
, profile = profile.out
, errors = problems
, missing = problemChars
)
if (is.null(file.out)) {
return(result)
} else {
# ---------------
# output to files
# ---------------
# file with tokenization is always returned
write.table( strings.out
, file = paste(file.out, "_strings.tsv", sep = "")
, quote = FALSE, sep = "\t", row.names = FALSE)
# file with orthography profile
write.table( profile.out
, file = paste(file.out, "_profile.tsv", sep="")
, quote = FALSE, sep = "\t", row.names = FALSE)
# additionally write tables with errors when they exist
if ( !is.null(problems) ) {
write.table( problems
, file = paste(file.out, "_errors.tsv", sep = "")
, quote = FALSE, sep = "\t", row.names = TRUE)
write.table( problemChars
, file = paste(file.out, "_missing.tsv", sep = "")
, quote = FALSE, sep = "\t", row.names = FALSE)
}
return(invisible(result))
}
}
| /R/tokenize.R | no_license | cysouw/qlcTokenize | R | false | false | 17,695 | r | # ================
# tokenize strings
# ================
tokenize <- function(strings
, profile = NULL
, transliterate = NULL
, method = "global"
, ordering = c("size","context","reverse")
, sep = " "
, sep.replace = NULL
, missing = "\u2047"
, normalize = "NFC"
, regex = FALSE
, silent = FALSE
, file.out = NULL
) {
# ---------------
# preprocess data
# ---------------
strings <- as.character(strings)
# option gives errors, so removed for now
case.insensitive = FALSE
# separators
internal_sep <- intToUtf8(1110000)
user_sep <- sep
# normalization
if (normalize == "NFC") {
transcode <- stri_trans_nfc
} else if (normalize == "NFD") {
transcode <- stri_trans_nfd
} else {
warning("Only the normalization-options NFC and NFD are implemented. No normalization will be performed.")
transcode <- identity
}
# keep original strings, and normalize NFC everything by default
originals <- as.vector(strings)
strings <- transcode(originals)
# collapse strings for doing everything at once
NAs <- which(is.na(strings))
strings[NAs] <- ""
all <- paste(strings, collapse = internal_sep)
all <- paste0(internal_sep, all, internal_sep)
# --------------------
# read or make profile
# --------------------
# read orthography profile (or make new one)
if (is.null(profile)) {
# make new orthography profile
if (normalize == "NFC") {
profile <- write.profile(strings
, normalize = normalize
, sep = NULL
, info = FALSE
)
} else {
profile <- write.profile(strings
, normalize = normalize
, sep = ""
, info = FALSE
)
}
} else if (is.null(dim(profile))) {
# use the provided profile
if (length(profile) > 1) {
# assume that the strings are graphemes
profile <- data.frame(Grapheme = profile
, stringsAsFactors = FALSE
)
} else {
# read profile from file
profile <- read.table(profile
, sep = "\t"
, quote = ""
, header = TRUE
, fill = TRUE
, colClasses = "character"
)
}
} else {
# assume the profile is a suitable R object
profile <- profile
}
# first-pass reordering, only getting larger graphemes on top
# ordering by grapheme size, if specified
# necessary to get regexes in right order
if (sum(!is.na(pmatch(ordering,"size"))) > 0) {
size <- nchar(stri_trans_nfd(profile[,"Grapheme"]))
profile <- profile[order(-size), ,drop = FALSE]
}
# normalise characters in profile, just to be sure
graphs <- transcode(profile[,"Grapheme"])
if (!is.null(transliterate)) {
trans <- transcode(profile[,transliterate])
}
# is there contextual information?
l_exists <- sum(colnames(profile) == "Left") == 1
r_exists <- sum(colnames(profile) == "Right") == 1
c_exists <- sum(colnames(profile) == "Class") == 1
# then normalise them too
if (l_exists) {
left <- transcode(profile[,"Left"])
} else {
left <- ""
}
if (r_exists) {
right <- transcode(profile[,"Right"])
} else {
right <- ""
}
# -----------------------------------------
# prepare regexes with context from profile
# -----------------------------------------
if (!regex) {
contexts <- graphs
} else {
# replace regex boundaries with internal separator
tmp <- intToUtf8(1110001)
right <- gsub("\\$", tmp, right, fixed = TRUE)
right <- gsub("\\$$", internal_sep, right)
right <- gsub(tmp, "\\$", right, fixed = TRUE)
left <- gsub("^\\^", internal_sep, left)
left <- gsub("([^\\[])\\^", paste0("\\1",internal_sep), left)
graphs <- gsub("\\$", tmp, graphs, fixed = TRUE)
graphs <- gsub("\\$$", internal_sep, graphs)
graphs <- gsub(tmp, "\\$", graphs, fixed = TRUE)
graphs <- gsub("^\\^", internal_sep, graphs)
graphs <- gsub("^\\.", paste0("[^", internal_sep, "]"), graphs)
# make classes if there is anything there
if (c_exists && sum(profile[,"Class"] != "") > 0) {
classes <- unique(profile[,"Class"])
classes <- classes[classes != ""]
groups <- sapply(classes,function(x){
graphs[profile[,"Class"] == x]
})
classes.regex <- sapply(groups,function(x){
paste( "((", paste( x, collapse = ")|(" ), "))", sep = "")
})
for (i in classes) {
left <- gsub(i, classes.regex[i], left, fixed = TRUE)
right <- gsub(i, classes.regex[i], right, fixed = TRUE)
graphs <- gsub(i, classes.regex[i], graphs, fixed = TRUE)
}
}
# add lookahead/lookbehind syntax and combine everything together
left[left != ""] <- paste("(?<=", left[left != ""], ")", sep = "")
right[right != ""] <- paste("(?=", right[right != ""], ")", sep = "")
# replace dot in context with internal separator
left <- gsub("(?<=."
, paste0("(?<!", internal_sep, ")(?<=" )
, left
, fixed = TRUE
)
right <- gsub("(?=."
, paste0("(?!", internal_sep, ")(?=" )
, right
, fixed = TRUE
)
contexts <- paste0(left, graphs, right)
}
# -----------------
# reorder graphemes
# -----------------
if (is.null(ordering)) {
graph_order <- 1:length(graphs)
} else {
# ordering by grapheme size
if (sum(!is.na(pmatch(ordering,"size"))) > 0) {
size <- nchar(stri_trans_nfd(graphs))
} else {
size <- rep(T, times = length(graphs))
}
# ordering by existing of context
if (regex && (l_exists || r_exists)) {
context <- (left != "" | right != "")
} else {
context <- rep(T, times = length(graphs))
}
# reverse ordering
if (sum(!is.na(pmatch(ordering,"reverse"))) > 0) {
reverse <- length(graphs):1
} else {
reverse <- 1:length(graphs)
}
# ordering by frequency of occurrence
if (sum(!is.na(pmatch(ordering,"frequency"))) > 0) {
frequency <- stri_count_regex(all
, pattern = contexts
, literal = !regex
, case_insensitive = case.insensitive
)
} else {
frequency <- rep(T, times = length(graphs))
}
# order according to dimensions chosen by user in "ordering"
dimensions <- list( size = - size # largest size first
, context = - context # with context first
, reverse = reverse # reverse first
, frequency = frequency # lowest frequency first
)
graph_order <- do.call(order, dimensions[ordering])
}
# change order
graphs <- graphs[graph_order]
contexts <- contexts[graph_order]
if (!is.null(transliterate)) {
trans <- trans[graph_order]
}
# --------------
# regex matching
# --------------
if (!regex) {
matches <- stri_locate_all_fixed(
all
, pattern = contexts
, overlap = TRUE
, case_insensitive = case.insensitive
)
matched_parts <- stri_extract_all_fixed(
all
, pattern = contexts
, overlap = TRUE
, case_insensitive = case.insensitive
)
} else {
matches <- stri_locate_all_regex(
all
, pattern = contexts
, case_insensitive = case.insensitive
)
matched_parts <- stri_extract_all_regex(
all
, pattern = contexts
, case_insensitive = case.insensitive
)
}
# --------------------------------------
# tokenize data, either global or linear
# --------------------------------------
if (!is.na(pmatch(method,"global"))) {
# =================
# function to check whether the match is still free
# and insert graph into "taken" when free
test_match <- function(context_nr) {
m <- matches[[context_nr]]
# check whether match is not yet taken
not.already.taken <- apply(m, 1, function(x) {
if (is.na(x[1])) { NA } else {
prod(is.na(taken[x[1]:x[2]])) == 1
}})
free <- which(not.already.taken)
if (length(free) > 0) {
no.self.overlap <- c(TRUE
, head(m[free,,drop = FALSE][,2],-1) <
tail(m[free,,drop = FALSE][,1],-1)
)
free <- free[no.self.overlap]
}
# check whether graph is regex with multiple matches
different_graphs <- unique(matched_parts[[context_nr]])
is.regex <- length(unique(different_graphs)) > 1
# take possible matches
for (x in free) {
r <- m[x,]
if (!is.regex) {
taken[r[1]:r[2]] <<- different_graphs
} else {
taken[r[1]:r[2]] <<- matched_parts[[context_nr]][x]
}
}
return(m[free, , drop = FALSE])
}
# =================
# preparation
taken <- rep(NA, times = nchar(all))
# select matches
selected <- sapply(1:length(matches), test_match, simplify = FALSE)
# count number of matches per rule
matched_rules <- sapply(selected, dim)[1,]
# insert internal separator
where_sep <- stri_locate_all_fixed(all, internal_sep)[[1]][,1]
taken[where_sep] <- internal_sep
# remaining NAs are missing parts
missing_chars <- sapply(which(is.na(taken))
, function(x) { stri_sub(all, x, x) }
)
taken[is.na(taken)] <- missing
# transliteration
if (!is.null(transliterate)) {
transliterated <- taken
sapply(1:length(selected), function(x) {
apply(selected[[x]], 1, function(y) {
transliterated[y[1]:y[2]] <<- trans[x]
})
})
}
# =================
# functions to turn matches into tokenized strings
reduce <- function(taken) {
# replace longer graphs with NA, then na.omit
sapply(selected, function(x) {
apply(x, 1, function(y) {
if (y[1] < y[2]) {
taken[(y[1]+1) : y[2]] <<- NA
}
})
})
result <- na.omit(taken)
return(result)
}
postprocess <- function(taken) {
# replace separator
if (!is.null(sep.replace)) {
taken[taken == user_sep] <- sep.replace
}
# bind together tokenized parts with user separator
taken <- paste(taken, collapse = user_sep)
# remove multiple internal user separators
taken <- gsub(paste0(user_sep,"{2,10}"), user_sep, taken)
# Split string by internal separator
result <- strsplit(taken, split = internal_sep)[[1]][-1]
# remove user_sep at start and end
result <- substr(x = result
, start = nchar(user_sep)+1
, stop = nchar(result)-nchar(user_sep)
)
return(result)
}
# =================
# make one string of the parts selected
tokenized <- postprocess(reduce(taken))
# make one string of transliterations
if (!is.null(transliterate)) {
transliterated <- postprocess(reduce(transliterated))
}
# ---------------------------------------------------------
# finite-state transducer behaviour when parsing = "linear"
# ---------------------------------------------------------
} else if (!is.na(pmatch(method,"linear"))) {
# preparations
all.matches <- do.call(rbind,matches)[,1]
position <- 1
tokenized <- c()
transliterated <- c()
missing_chars <- c()
matched_rules <- rep.int(x = 0, times = length(contexts))
where_sep <- stri_locate_all_fixed(all, internal_sep)[[1]][,1]
graphs_match_list <- unlist(matched_parts)
contexts_match_list <- rep(1:length(matches)
, times = sapply(matches, dim)[1,]
)
if (!is.null(transliterate)) {
trans_match_list <- rep(trans
, times = sapply(matches, dim)[1,]
)
}
# loop through all positions and take first match
while(position <= nchar(all)) {
if (position %in% where_sep) {
tokenized <- c(tokenized, internal_sep)
if (!is.null(transliterate)) {
transliterated <- c(transliterated, internal_sep)
}
position <- position +1
} else {
hit <- which(all.matches == position)[1]
if (is.na(hit)) {
tokenized <- c(tokenized, missing)
missing_chars <- c(missing_chars
, substr(all, position, position)
)
if (!is.null(transliterate)) {
transliterated <- c(transliterated, missing)
}
position <- position + 1
} else {
tokenized <- c(tokenized, graphs_match_list[hit])
if (!is.null(transliterate)) {
transliterated <- c(transliterated, trans_match_list[hit])
}
position <- position + nchar(graphs_match_list[hit])
rule <- contexts_match_list[hit]
matched_rules[rule] <- matched_rules[rule] + 1
}
}
}
# =============
postprocess <- function(taken) {
# bind together tokenized parts with user separator
taken <- paste(taken, collapse = user_sep)
# Split string by internal separator
result <- strsplit(taken, split = internal_sep)[[1]]
# remove user_sep at start and end
result <- substr(result, 2, nchar(result)-1)
result <- result[-1]
return(result)
}
# =============
# postprocessing
tokenized <- postprocess(tokenized)
if (!is.null(transliterate)) {
transliterated <- postprocess(transliterated)
}
} else {
stop(paste0("The tokenization method \"",method,"\" is not defined"))
}
# ----------------------
# preparation of results
# ----------------------
tokenized[NAs] <- NA
if (is.null(transliterate)) {
strings.out <- data.frame(
cbind(originals = originals
, tokenized = tokenized
)
, stringsAsFactors = FALSE
)
} else {
transliterated[NAs] <- NA
strings.out <- data.frame(
cbind(originals = originals
, tokenized = tokenized
, transliterated = transliterated
)
, stringsAsFactors = FALSE
)
}
# Make a list of missing and throw warning
whichProblems <- grep(pattern = missing, x = tokenized)
problems <- strings.out[whichProblems, c(1,2)]
colnames(problems) <- c("originals", "errors")
if ( nrow(problems) > 0) {
# make a profile for missing characters
problemChars <- write.profile(missing_chars)
if ( !silent ) {
warning("\nThere were unknown characters found in the input data.\nCheck output$errors for a table with all problematic strings.")
}
} else {
problems <- NULL
problemChars <- NULL
}
# Reorder profile according to order and add frequency of rule-use
# frequency <- head(frequency, -1)
profile.out <- data.frame(profile[graph_order,]
, stringsAsFactors = FALSE
)
if (ncol(profile.out) == 1) {colnames(profile.out) <- "Grapheme"}
profile.out <- cbind(matched_rules, profile.out)
# --------------
# output as list
# --------------
result <- list(strings = strings.out
, profile = profile.out
, errors = problems
, missing = problemChars
)
if (is.null(file.out)) {
return(result)
} else {
# ---------------
# output to files
# ---------------
# file with tokenization is always returned
write.table( strings.out
, file = paste(file.out, "_strings.tsv", sep = "")
, quote = FALSE, sep = "\t", row.names = FALSE)
# file with orthography profile
write.table( profile.out
, file = paste(file.out, "_profile.tsv", sep="")
, quote = FALSE, sep = "\t", row.names = FALSE)
# additionally write tables with errors when they exist
if ( !is.null(problems) ) {
write.table( problems
, file = paste(file.out, "_errors.tsv", sep = "")
, quote = FALSE, sep = "\t", row.names = TRUE)
write.table( problemChars
, file = paste(file.out, "_missing.tsv", sep = "")
, quote = FALSE, sep = "\t", row.names = FALSE)
}
return(invisible(result))
}
}
|
library(xlsx)
library(ggplot2)
library(tidyverse)
library(dplyr)
library(corrplot)
library(FactoMineR)
library(factoextra)
library(Hmisc)
#importation de la table des individus depuis le presse papier
individus <- read.table(file = "clipboard", sep = "\t", header=TRUE)
colnames(individus)<- c('Subject','age','gender','classe_age')
print(head(individus))
#importation données sur le genre et sélection des colonnes d'intérêt
table_espece <- read.csv("table_age_1_sex_espece_diversite_VF.csv", sep=",", dec=".", header=TRUE)
table_espece<- select(table_espece, -c(1,2,3, 4, 5, 76, 77))
#ajout de la colonne classe d'âge
classe_age<-select(individus, 4)
table_acp<-cbind(classe_age, table_espece)
#ACP
res_acp <- PCA(table_acp, ncp=50, graph=FALSE)
#enregistement de l'ACP
saveRDS(res_acp, "res_acp_classesage_10_species.rds")
#affichage des résultats
print(res_acp)
eig_val <- get_eigenvalue(res_acp)
eig_val
#plots
fviz_pca_var(res_acp, col.var = "black")
fviz_pca_ind (res_acp, habillage="classe_age", label=FALSE)
fviz_pca_ind (res_acp, select.ind=list(classe_age=c(1,2,3,4,5)), habillage="classe_age", label=FALSE)
#selection des genres qui contribuent le plus aux cinquante premières composantes
#récupération des varaibles de l'ACP
var <- get_pca_var(res_acp)
variables_ <- as.data.frame(var$contrib)
#récupération des 50 premières dimentsions et des genres qui y contribuent
dimension1 <- select(variables_, Dim.1)
dimension1 <- filter(dimension1, Dim.1>1)
dimension2 <- select(variables_, Dim.2)
dimension2 <- filter(dimension2, Dim.2>1)
dimension3 <- select(variables_, Dim.3)
dimension3 <- filter(dimension3, Dim.3>1)
dimension4 <- select(variables_, Dim.4)
dimension4 <- filter(dimension4, Dim.4>1)
dimension5 <- select(variables_, Dim.5)
dimension5 <- filter(dimension5, Dim.5>1)
dimension6 <- select(variables_, Dim.6)
dimension6 <- filter(dimension6, Dim.6>1)
dimension7 <- select(variables_, Dim.7)
dimension7 <- filter(dimension7, Dim.7>1)
dimension8 <- select(variables_, Dim.8)
dimension8 <- filter(dimension8, Dim.8>1)
dimension9 <- select(variables_, Dim.9)
dimension9 <- filter(dimension9, Dim.9>1)
dimension10 <- select(variables_, Dim.10)
dimension10 <- filter(dimension10, Dim.10>1)
dimension11 <- select(variables_, Dim.11)
dimension11 <- filter(dimension11, Dim.11>1)
dimension12 <- select(variables_, Dim.12)
dimension12 <- filter(dimension12, Dim.12>1)
dimension13 <- select(variables_, Dim.13)
dimension13 <- filter(dimension13, Dim.13>1)
dimension14 <- select(variables_, Dim.14)
dimension14 <- filter(dimension14, Dim.14>1)
dimension15 <- select(variables_, Dim.15)
dimension15 <- filter(dimension15, Dim.15>1)
dimension16 <- select(variables_, Dim.16)
dimension16 <- filter(dimension16, Dim.16>1)
dimension17 <- select(variables_, Dim.17)
dimension17 <- filter(dimension17, Dim.17>1)
dimension18 <- select(variables_, Dim.18)
dimension18 <- filter(dimension18, Dim.18>1)
dimension19 <- select(variables_, Dim.19)
dimension19 <- filter(dimension19, Dim.19>1)
dimension20 <- select(variables_, Dim.20)
dimension20 <- filter(dimension20, Dim.20>1)
dimension21 <- select(variables_, Dim.21)
dimension21 <- filter(dimension21, Dim.21>1)
dimension22 <- select(variables_, Dim.22)
dimension22 <- filter(dimension22, Dim.22>1)
dimension23 <- select(variables_, Dim.23)
dimension23 <- filter(dimension23, Dim.23>1)
dimension24 <- select(variables_, Dim.24)
dimension24 <- filter(dimension24, Dim.24>1)
dimension25 <- select(variables_, Dim.25)
dimension25 <- filter(dimension25, Dim.25>1)
dimension26 <- select(variables_, Dim.26)
dimension26 <- filter(dimension26, Dim.26>1)
dimension27 <- select(variables_, Dim.27)
dimension27 <- filter(dimension27, Dim.27>1)
dimension28 <- select(variables_, Dim.28)
dimension28 <- filter(dimension28, Dim.28>1)
dimension29 <- select(variables_, Dim.29)
dimension29 <- filter(dimension29, Dim.29>1)
dimension30 <- select(variables_, Dim.30)
dimension30 <- filter(dimension30, Dim.30>1)
dimension31 <- select(variables_, Dim.31)
dimension31 <- filter(dimension31, Dim.31>1)
dimension32 <- select(variables_, Dim.32)
dimension32 <- filter(dimension32, Dim.32>1)
dimension33 <- select(variables_, Dim.33)
dimension33 <- filter(dimension33, Dim.33>1)
dimension34 <- select(variables_, Dim.34)
dimension34 <- filter(dimension34, Dim.34>1)
dimension35 <- select(variables_, Dim.35)
dimension35 <- filter(dimension35, Dim.35>1)
dimension36 <- select(variables_, Dim.36)
dimension36 <- filter(dimension36, Dim.36>1)
dimension37 <- select(variables_, Dim.37)
dimension37 <- filter(dimension37, Dim.37>1)
dimension38 <- select(variables_, Dim.38)
dimension38 <- filter(dimension38, Dim.38>1)
dimension39 <- select(variables_, Dim.39)
dimension39 <- filter(dimension39, Dim.39>1)
dimension40 <- select(variables_, Dim.40)
dimension40 <- filter(dimension40, Dim.40>1)
dimension41 <- select(variables_, Dim.41)
dimension41 <- filter(dimension41, Dim.41>1)
dimension42 <- select(variables_, Dim.42)
dimension42 <- filter(dimension42, Dim.42>1)
dimension43 <- select(variables_, Dim.43)
dimension43 <- filter(dimension43, Dim.43>1)
dimension44 <- select(variables_, Dim.44)
dimension44 <- filter(dimension44, Dim.44>1)
dimension45 <- select(variables_, Dim.45)
dimension45 <- filter(dimension45, Dim.45>1)
dimension46 <- select(variables_, Dim.46)
dimension46 <- filter(dimension46, Dim.46>1)
dimension47 <- select(variables_, Dim.47)
dimension47 <- filter(dimension47, Dim.47>1)
dimension48 <- select(variables_, Dim.48)
dimension48 <- filter(dimension48, Dim.48>1)
dimension49 <- select(variables_, Dim.49)
dimension49 <- filter(dimension49, Dim.49>1)
dimension50 <- select(variables_, Dim.50)
dimension50 <- filter(dimension50, Dim.50>1)
typeof(variables_)
#vecteur des genres des 50 dimensions d'intérêt
liste_dimension<-c(list(row.names(dimension1)),row.names(dimension2),row.names(dimension3),row.names(dimension4),row.names(dimension5),row.names(dimension6),row.names(dimension7),row.names(dimension8),row.names(dimension9),row.names(dimension10),row.names(dimension11),row.names(dimension12),row.names(dimension13),row.names(dimension14),row.names(dimension15),row.names(dimension16),row.names(dimension17),row.names(dimension18),row.names(dimension19),row.names(dimension20),row.names(dimension21),row.names(dimension22),row.names(dimension23),row.names(dimension24),row.names(dimension25),row.names(dimension26),row.names(dimension27),row.names(dimension28),row.names(dimension29),row.names(dimension30),row.names(dimension31),row.names(dimension32),row.names(dimension33),row.names(dimension34),row.names(dimension35),row.names(dimension36),row.names(dimension37),row.names(dimension38),row.names(dimension39),row.names(dimension40),row.names(dimension41),row.names(dimension42),row.names(dimension43),row.names(dimension44),row.names(dimension45),row.names(dimension46),row.names(dimension47),row.names(dimension48),row.names(dimension49),row.names(dimension50))
#initialisation d'une liste vide dans laquelle stocker les genres séléctionnés
species_selectionne=NULL
#creation d'une fonction pour vérifier la nom présence d'un élement x dans une table
`%not in%` <- function (x, table) is.na(match(x, table, nomatch=NA_integer_))
#parcours de la liste des 50 dimensions pour récupérer les genres sans répétition avec la fonction précédente
for (i in 1:50) {
presence=0
for (l in 1:length((liste_dimension[[i]]))) {
for (j in 1:50){
if (liste_dimension[[i]][l] %in% liste_dimension[[j]]){
presence=presence+1
}
}
}
if (presence>=1){
if (liste_dimension[[i]][l] %not in% species_selectionne){
species_selectionne[length(species_selectionne)+1]=liste_dimension[[i]][l]
}
}
}
#affichage final
species_selectionne<-species_selectionne[1]+species_selectionne[3:33]
| /Programs/4_1_1_ACP_Selection_variables_especes.R | no_license | AlQatrum/ProjetFilRouge | R | false | false | 8,121 | r | library(xlsx)
library(ggplot2)
library(tidyverse)
library(dplyr)
library(corrplot)
library(FactoMineR)
library(factoextra)
library(Hmisc)
#importation de la table des individus depuis le presse papier
individus <- read.table(file = "clipboard", sep = "\t", header=TRUE)
colnames(individus)<- c('Subject','age','gender','classe_age')
print(head(individus))
#importation données sur le genre et sélection des colonnes d'intérêt
table_espece <- read.csv("table_age_1_sex_espece_diversite_VF.csv", sep=",", dec=".", header=TRUE)
table_espece<- select(table_espece, -c(1,2,3, 4, 5, 76, 77))
#ajout de la colonne classe d'âge
classe_age<-select(individus, 4)
table_acp<-cbind(classe_age, table_espece)
#ACP
res_acp <- PCA(table_acp, ncp=50, graph=FALSE)
#enregistement de l'ACP
saveRDS(res_acp, "res_acp_classesage_10_species.rds")
#affichage des résultats
print(res_acp)
eig_val <- get_eigenvalue(res_acp)
eig_val
#plots
fviz_pca_var(res_acp, col.var = "black")
fviz_pca_ind (res_acp, habillage="classe_age", label=FALSE)
fviz_pca_ind (res_acp, select.ind=list(classe_age=c(1,2,3,4,5)), habillage="classe_age", label=FALSE)
#selection des genres qui contribuent le plus aux cinquante premières composantes
#récupération des varaibles de l'ACP
var <- get_pca_var(res_acp)
variables_ <- as.data.frame(var$contrib)
#récupération des 50 premières dimentsions et des genres qui y contribuent
dimension1 <- select(variables_, Dim.1)
dimension1 <- filter(dimension1, Dim.1>1)
dimension2 <- select(variables_, Dim.2)
dimension2 <- filter(dimension2, Dim.2>1)
dimension3 <- select(variables_, Dim.3)
dimension3 <- filter(dimension3, Dim.3>1)
dimension4 <- select(variables_, Dim.4)
dimension4 <- filter(dimension4, Dim.4>1)
dimension5 <- select(variables_, Dim.5)
dimension5 <- filter(dimension5, Dim.5>1)
dimension6 <- select(variables_, Dim.6)
dimension6 <- filter(dimension6, Dim.6>1)
dimension7 <- select(variables_, Dim.7)
dimension7 <- filter(dimension7, Dim.7>1)
dimension8 <- select(variables_, Dim.8)
dimension8 <- filter(dimension8, Dim.8>1)
dimension9 <- select(variables_, Dim.9)
dimension9 <- filter(dimension9, Dim.9>1)
dimension10 <- select(variables_, Dim.10)
dimension10 <- filter(dimension10, Dim.10>1)
dimension11 <- select(variables_, Dim.11)
dimension11 <- filter(dimension11, Dim.11>1)
dimension12 <- select(variables_, Dim.12)
dimension12 <- filter(dimension12, Dim.12>1)
dimension13 <- select(variables_, Dim.13)
dimension13 <- filter(dimension13, Dim.13>1)
dimension14 <- select(variables_, Dim.14)
dimension14 <- filter(dimension14, Dim.14>1)
dimension15 <- select(variables_, Dim.15)
dimension15 <- filter(dimension15, Dim.15>1)
dimension16 <- select(variables_, Dim.16)
dimension16 <- filter(dimension16, Dim.16>1)
dimension17 <- select(variables_, Dim.17)
dimension17 <- filter(dimension17, Dim.17>1)
dimension18 <- select(variables_, Dim.18)
dimension18 <- filter(dimension18, Dim.18>1)
dimension19 <- select(variables_, Dim.19)
dimension19 <- filter(dimension19, Dim.19>1)
dimension20 <- select(variables_, Dim.20)
dimension20 <- filter(dimension20, Dim.20>1)
dimension21 <- select(variables_, Dim.21)
dimension21 <- filter(dimension21, Dim.21>1)
dimension22 <- select(variables_, Dim.22)
dimension22 <- filter(dimension22, Dim.22>1)
dimension23 <- select(variables_, Dim.23)
dimension23 <- filter(dimension23, Dim.23>1)
dimension24 <- select(variables_, Dim.24)
dimension24 <- filter(dimension24, Dim.24>1)
dimension25 <- select(variables_, Dim.25)
dimension25 <- filter(dimension25, Dim.25>1)
dimension26 <- select(variables_, Dim.26)
dimension26 <- filter(dimension26, Dim.26>1)
dimension27 <- select(variables_, Dim.27)
dimension27 <- filter(dimension27, Dim.27>1)
dimension28 <- select(variables_, Dim.28)
dimension28 <- filter(dimension28, Dim.28>1)
dimension29 <- select(variables_, Dim.29)
dimension29 <- filter(dimension29, Dim.29>1)
dimension30 <- select(variables_, Dim.30)
dimension30 <- filter(dimension30, Dim.30>1)
dimension31 <- select(variables_, Dim.31)
dimension31 <- filter(dimension31, Dim.31>1)
dimension32 <- select(variables_, Dim.32)
dimension32 <- filter(dimension32, Dim.32>1)
dimension33 <- select(variables_, Dim.33)
dimension33 <- filter(dimension33, Dim.33>1)
dimension34 <- select(variables_, Dim.34)
dimension34 <- filter(dimension34, Dim.34>1)
dimension35 <- select(variables_, Dim.35)
dimension35 <- filter(dimension35, Dim.35>1)
dimension36 <- select(variables_, Dim.36)
dimension36 <- filter(dimension36, Dim.36>1)
dimension37 <- select(variables_, Dim.37)
dimension37 <- filter(dimension37, Dim.37>1)
dimension38 <- select(variables_, Dim.38)
dimension38 <- filter(dimension38, Dim.38>1)
dimension39 <- select(variables_, Dim.39)
dimension39 <- filter(dimension39, Dim.39>1)
dimension40 <- select(variables_, Dim.40)
dimension40 <- filter(dimension40, Dim.40>1)
dimension41 <- select(variables_, Dim.41)
dimension41 <- filter(dimension41, Dim.41>1)
dimension42 <- select(variables_, Dim.42)
dimension42 <- filter(dimension42, Dim.42>1)
dimension43 <- select(variables_, Dim.43)
dimension43 <- filter(dimension43, Dim.43>1)
dimension44 <- select(variables_, Dim.44)
dimension44 <- filter(dimension44, Dim.44>1)
dimension45 <- select(variables_, Dim.45)
dimension45 <- filter(dimension45, Dim.45>1)
dimension46 <- select(variables_, Dim.46)
dimension46 <- filter(dimension46, Dim.46>1)
dimension47 <- select(variables_, Dim.47)
dimension47 <- filter(dimension47, Dim.47>1)
dimension48 <- select(variables_, Dim.48)
dimension48 <- filter(dimension48, Dim.48>1)
dimension49 <- select(variables_, Dim.49)
dimension49 <- filter(dimension49, Dim.49>1)
dimension50 <- select(variables_, Dim.50)
dimension50 <- filter(dimension50, Dim.50>1)
typeof(variables_)
#vecteur des genres des 50 dimensions d'intérêt
liste_dimension<-c(list(row.names(dimension1)),row.names(dimension2),row.names(dimension3),row.names(dimension4),row.names(dimension5),row.names(dimension6),row.names(dimension7),row.names(dimension8),row.names(dimension9),row.names(dimension10),row.names(dimension11),row.names(dimension12),row.names(dimension13),row.names(dimension14),row.names(dimension15),row.names(dimension16),row.names(dimension17),row.names(dimension18),row.names(dimension19),row.names(dimension20),row.names(dimension21),row.names(dimension22),row.names(dimension23),row.names(dimension24),row.names(dimension25),row.names(dimension26),row.names(dimension27),row.names(dimension28),row.names(dimension29),row.names(dimension30),row.names(dimension31),row.names(dimension32),row.names(dimension33),row.names(dimension34),row.names(dimension35),row.names(dimension36),row.names(dimension37),row.names(dimension38),row.names(dimension39),row.names(dimension40),row.names(dimension41),row.names(dimension42),row.names(dimension43),row.names(dimension44),row.names(dimension45),row.names(dimension46),row.names(dimension47),row.names(dimension48),row.names(dimension49),row.names(dimension50))
#initialisation d'une liste vide dans laquelle stocker les genres séléctionnés
species_selectionne=NULL
#creation d'une fonction pour vérifier la nom présence d'un élement x dans une table
`%not in%` <- function (x, table) is.na(match(x, table, nomatch=NA_integer_))
#parcours de la liste des 50 dimensions pour récupérer les genres sans répétition avec la fonction précédente
for (i in 1:50) {
presence=0
for (l in 1:length((liste_dimension[[i]]))) {
for (j in 1:50){
if (liste_dimension[[i]][l] %in% liste_dimension[[j]]){
presence=presence+1
}
}
}
if (presence>=1){
if (liste_dimension[[i]][l] %not in% species_selectionne){
species_selectionne[length(species_selectionne)+1]=liste_dimension[[i]][l]
}
}
}
#affichage final
species_selectionne<-species_selectionne[1]+species_selectionne[3:33]
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/main.R
\name{estimate}
\alias{estimate}
\title{wrapper around RM function from eRm}
\usage{
estimate(n, items, model_sim, ...)
}
\arguments{
\item{n,}{items: numeric}
\item{model_sim:}{function to simulate Rasch data}
\item{...:}{additional arguments to model_sim}
}
\value{
returns the estimated eRm object
}
\description{
tryCatch until the simulated data matrix is neither ill-conditioned
nor has a participant with all 0 or all 1
}
\examples{
estimate(400, 30, sim.2pl, .5)
}
| /man/estimate.Rd | no_license | fdabl/simrasch | R | false | false | 569 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/main.R
\name{estimate}
\alias{estimate}
\title{wrapper around RM function from eRm}
\usage{
estimate(n, items, model_sim, ...)
}
\arguments{
\item{n,}{items: numeric}
\item{model_sim:}{function to simulate Rasch data}
\item{...:}{additional arguments to model_sim}
}
\value{
returns the estimated eRm object
}
\description{
tryCatch until the simulated data matrix is neither ill-conditioned
nor has a participant with all 0 or all 1
}
\examples{
estimate(400, 30, sim.2pl, .5)
}
|
library(qdap)
### Name: is.global
### Title: Test If Environment is Global
### Aliases: is.global
### ** Examples
is.global()
lapply(1:3, function(i) is.global())
FUN <- function() is.global(); FUN()
FUN2 <- function(x = is.global(2)) x
FUN2()
FUN3 <- function() FUN2(); FUN3()
| /data/genthat_extracted_code/qdap/examples/is.global.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 286 | r | library(qdap)
### Name: is.global
### Title: Test If Environment is Global
### Aliases: is.global
### ** Examples
is.global()
lapply(1:3, function(i) is.global())
FUN <- function() is.global(); FUN()
FUN2 <- function(x = is.global(2)) x
FUN2()
FUN3 <- function() FUN2(); FUN3()
|
#Linear Regression lab
#Read in data
selfesteem.data <- read.csv(".\\lab02.csv")
selfesteem.data
#Basic Scatterplot
?plot
plot(selfesteem.data$Height,selfesteem.data$Selfesteem)
#Scatterplot with labs, and controlling axes
plot(selfesteem.data$Height,selfesteem.data$Selfesteem,
main="Scatterplot of Person Height versus Self Esteem",
xlab = "Height", ylab="Self Esteem",
xlim=c(55, 75), ylim=c(2.5, 5.5), pch = 8, col="seagreen3",
cex=1.5, cex.lab = 1.5, cex.main = 1.5)
x.mean <- mean(selfesteem.data$Height)
x.mean
y.mean <- mean(selfesteem.data$Selfesteem)
y.mean
x.sd <- sd(selfesteem.data$Height)
x.sd
y.sd <- sd(selfesteem.data$Selfesteem)
y.sd
#Calculate Sample Correlation
cor(selfesteem.data$Height,selfesteem.data$Selfesteem, use="pairwise.complete.obs")
#Simple Linear Regression
lm(housedata$HousePrice~housedata$Size)
m<-lm(housedata$HousePrice~housedata$Size)
#Adding regression line to the current plot
abline(m,col="red") | /Labs/Lab02/Lab02.R | no_license | abdelrady/BigDataAnalytics-Labs | R | false | false | 970 | r | #Linear Regression lab
#Read in data
selfesteem.data <- read.csv(".\\lab02.csv")
selfesteem.data
#Basic Scatterplot
?plot
plot(selfesteem.data$Height,selfesteem.data$Selfesteem)
#Scatterplot with labs, and controlling axes
plot(selfesteem.data$Height,selfesteem.data$Selfesteem,
main="Scatterplot of Person Height versus Self Esteem",
xlab = "Height", ylab="Self Esteem",
xlim=c(55, 75), ylim=c(2.5, 5.5), pch = 8, col="seagreen3",
cex=1.5, cex.lab = 1.5, cex.main = 1.5)
x.mean <- mean(selfesteem.data$Height)
x.mean
y.mean <- mean(selfesteem.data$Selfesteem)
y.mean
x.sd <- sd(selfesteem.data$Height)
x.sd
y.sd <- sd(selfesteem.data$Selfesteem)
y.sd
#Calculate Sample Correlation
cor(selfesteem.data$Height,selfesteem.data$Selfesteem, use="pairwise.complete.obs")
#Simple Linear Regression
lm(housedata$HousePrice~housedata$Size)
m<-lm(housedata$HousePrice~housedata$Size)
#Adding regression line to the current plot
abline(m,col="red") |
#assume no NA values
#moved to myscale.r
# myscale <- function(x){
# (x - mean(x)) / sd(x)
# }
sanitize <- function(txt) {
#list of bad chars from http://gavinmiller.io/2016/creating-a-secure-sanitization-function/
#c('/', '+', '\\', '?', '%', '*', ':', '|', '"', '<', '>', '.', ' ')
#convert this list to regex form
badCharsRegex <- c('/', '\\+', '\\\\','\\?','%','\\*',':','\\|','\\"','<','>','\\.')
#create a character class
badCharClass <- paste(c('[',badCharsRegex,']'),collapse='')
txt <- gsub(badCharClass,'_',txt) #replace all bad characters but space
txt <- gsub('\\s','_',txt) #replace space (needs to be outside character class)
return(gsub('__+','_',txt)) #replace repeated instances of _ with a single instance
}
writeDF <- function(df,prefix) {
filename <- paste(prefix,"_",Sys.Date(),".csv",sep="")
wd<-getwd() #store the cwd for housekeeping activity at the end - just so your script plays nice
setwd (tempdir())
write.csv(df, file=filename, row.names=FALSE)
if(.Platform$OS.type=='unix') { #also returns "unix" for mac
system(paste("open", filename))
} else if(.Platform$OS.type=='windows') { #haven't tested this on windows
shell.exec(filename) #opens the file in excel
}
setwd(wd) #return to original cwd when done
} | /R/hfns.r | no_license | benscarlson/bencmisc | R | false | false | 1,287 | r | #assume no NA values
#moved to myscale.r
# myscale <- function(x){
# (x - mean(x)) / sd(x)
# }
sanitize <- function(txt) {
#list of bad chars from http://gavinmiller.io/2016/creating-a-secure-sanitization-function/
#c('/', '+', '\\', '?', '%', '*', ':', '|', '"', '<', '>', '.', ' ')
#convert this list to regex form
badCharsRegex <- c('/', '\\+', '\\\\','\\?','%','\\*',':','\\|','\\"','<','>','\\.')
#create a character class
badCharClass <- paste(c('[',badCharsRegex,']'),collapse='')
txt <- gsub(badCharClass,'_',txt) #replace all bad characters but space
txt <- gsub('\\s','_',txt) #replace space (needs to be outside character class)
return(gsub('__+','_',txt)) #replace repeated instances of _ with a single instance
}
writeDF <- function(df,prefix) {
filename <- paste(prefix,"_",Sys.Date(),".csv",sep="")
wd<-getwd() #store the cwd for housekeeping activity at the end - just so your script plays nice
setwd (tempdir())
write.csv(df, file=filename, row.names=FALSE)
if(.Platform$OS.type=='unix') { #also returns "unix" for mac
system(paste("open", filename))
} else if(.Platform$OS.type=='windows') { #haven't tested this on windows
shell.exec(filename) #opens the file in excel
}
setwd(wd) #return to original cwd when done
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.iam_operations.R
\name{get_instance_profile}
\alias{get_instance_profile}
\title{Retrieves information about the specified instance profile, including the instance profile's path, GUID, ARN, and role}
\usage{
get_instance_profile(InstanceProfileName)
}
\arguments{
\item{InstanceProfileName}{[required] The name of the instance profile to get information about.
This parameter allows (through its \href{http://wikipedia.org/wiki/regex}{regex pattern}) a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: \_+=,.@-}
}
\description{
Retrieves information about the specified instance profile, including the instance profile's path, GUID, ARN, and role. For more information about instance profiles, see \href{http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html}{About Instance Profiles} in the \emph{IAM User Guide}.
}
\section{Accepted Parameters}{
\preformatted{get_instance_profile(
InstanceProfileName = "string"
)
}
}
\examples{
# The following command gets information about the instance profile named
# ExampleInstanceProfile.
\donttest{get_instance_profile(
InstanceProfileName = "ExampleInstanceProfile"
)}
}
| /service/paws.iam/man/get_instance_profile.Rd | permissive | CR-Mercado/paws | R | false | true | 1,327 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.iam_operations.R
\name{get_instance_profile}
\alias{get_instance_profile}
\title{Retrieves information about the specified instance profile, including the instance profile's path, GUID, ARN, and role}
\usage{
get_instance_profile(InstanceProfileName)
}
\arguments{
\item{InstanceProfileName}{[required] The name of the instance profile to get information about.
This parameter allows (through its \href{http://wikipedia.org/wiki/regex}{regex pattern}) a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: \_+=,.@-}
}
\description{
Retrieves information about the specified instance profile, including the instance profile's path, GUID, ARN, and role. For more information about instance profiles, see \href{http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html}{About Instance Profiles} in the \emph{IAM User Guide}.
}
\section{Accepted Parameters}{
\preformatted{get_instance_profile(
InstanceProfileName = "string"
)
}
}
\examples{
# The following command gets information about the instance profile named
# ExampleInstanceProfile.
\donttest{get_instance_profile(
InstanceProfileName = "ExampleInstanceProfile"
)}
}
|
################# Homework 10 ###################
setwd("/Users/vincentcholewa/Documents/GAT/ISYE/isye_wd")
# # Data Set Information:
# # Samples arrive periodically as Dr. Wolberg reports his clinical cases. The database therefore reflects this chronological
# #grouping of the data. This grouping information appears immediately below, having been removed from the data itself:
# # Group 1: 367 instances (January 1989)
# # Group 2: 70 instances (October 1989)
# # Group 3: 31 instances (February 1990)
# # Group 4: 17 instances (April 1990)
# # Group 5: 48 instances (August 1990)
# # Group 6: 49 instances (Updated January 1991)
# # Group 7: 31 instances (June 1991)
# # Group 8: 86 instances (November 1991)
#
# 1. Sample code number id number
# 2. Clump Thickness 1 - 10
# 3. Uniformity of Cell Size 1 - 10
# 4. Uniformity of Cell Shape 1 - 10
# 5. Marginal Adhesion 1 - 10
# 6. Single Epithelial Cell Size 1 - 10
# 7. Bare Nuclei 1 - 10
# 8. Bland Chromatin 1 - 10
# 9. Normal Nucleoli 1 - 10
# 10. Mitoses 1 - 10
# 11. Class: (2 for benign, 4 for malignant)
bc = read.table(file = "breast_cancer.data", header = FALSE, sep = ",",
col.names = c('code_num','thickness','uniformity_size', 'uniformity_shape', 'adhesion',
'epithelial_size',
'nuclei', 'chromatin', 'nucleoli', 'mitoses', 'class' ))
bc_missing = read.table(file = "breast_cancer.data", header = FALSE, sep = ",",
col.names = c('code_num','thickness','uniformity_size', 'uniformity_shape', 'adhesion',
'epithelial_size',
'nuclei', 'chromatin', 'nucleoli', 'mitoses', 'class' ))
#bc
bc_q <- bc == "?"
# replace elements with NA
is.na(bc) = bc_q
colSums(is.na(bc))
# > colSums(is.na(bc))
# code_num thickness uniformity_size uniformity_shape
# 0 0 0 0
# adhesion epithelial_size nuclei chromatin
# 0 0 16 0
# nucleoli mitoses class
# 0 0 0
# Bare nuclei appears to be the only variable that contains values we need to impute for
# and has a total of 16 instances that reflect NA.
## There are two types of missing data:
# 1. MCAR: missing completely at random. This is the desirable scenario in case of missing data.
# 2. MNAR: missing not at random. Missing not at random data is a more serious issue and in this case it
# might be wise to check the data gathering process further and try to understand why the information is missing.
# Let's have a look at the bare nuclei dataset
bc$nuclei
# When looking at the data set a large portion of NAs are covered from rows 136 to rows 316.
# This would be something to examine in more detail to see if a trend occured during a survey
# that is creating the missing fields
length((bc$nuclei))
# 699
# Per the lecture videos, the amount of n/a should not exceed 5%. As you will see below, this dataset's
# NA count amounts to only 2.2%
sum(is.na(bc$nuclei))/length((bc$nuclei))*100
# 2.288984%
# Let's review by looking at the summary statstics and charting a pairwise visual to assess correlations.
summary(bc)
pairs(bc)
library(mice)
library(VIM)
#T he package MICE is a good library to handle missing data.
# The package creates multiple imputations (replacement values) for multivariate missing data.
# The method is based on Fully Conditional Specification, where each incomplete variable is imputed by a separate model.
# The MICE algorithm can impute mixes of continuous, binary, unordered categorical and ordered categorical data.
# In addition, MICE can impute continuous two-level data, and maintain consistency between imputations by means of passive imputation.
# Many diagnostic plots are implemented to inspect the quality of the imputations.
# Main Functions of Mice
# mice() Impute the missing data *m* times
# with() Analyze completed data sets
# pool() Combine parameter estimates
# complete() Export imputed data
# ampute() Generate missing data
md.pairs(bc)
md.pattern(bc)
# > md.pattern(bc)
# code_num thickness uniformity_size uniformity_shape adhesion epithelial_size chromatin nucleoli mitoses class nuclei
# 683 1 1 1 1 1 1 1 1 1 1 1 0
# 16 1 1 1 1 1 1 1 1 1 1 0 1
# 0 0 0 0
# The MICE pattern function first states that we have 683 complete variables and 16 missing. It then delineates which
# variables are missing information.
# I found this from a tutorial that went through the MICE library. What this plot shows is the graphical representation of
# missing data. In our data set Bare Nuclei is the only variable that is missing data and accounts for a litte over 2%.
aggr_plot = aggr(bc$nuclei, col=c('navyblue','red'), numbers=TRUE, sortVars=TRUE, labels=names(bc),
ylab=c("Histogram of missing data","Pattern"))
# Convert dataset into data frame
bc_as_data = as.data.frame(bc)
# Plot pbox using pos = 7 (nuclei)
pbox(x = bc_as_data, pos = 7)
###################### Mean/Mode Imputation #############
# 14.1.1 Use the mean/mode imputation method to impute values for the missing data
bc_mean_data = mice(bc,m=5,maxit=5,method ='pmm',seed=500)
#### imputs ####
# m -> number of multiple imputations, maxit -> scaler giving the number of iterations,
# pmm -> predictive mean matching
summary(bc_mean_data)
# Let's check to determine if the mean forumula above worked correctly to replace NAs with
# the mean.
bc_mean_data$imp$nuclei
# > bc_data$imp$nuclei
# 1 2 3 4 5
# 24 10 4 10 5 10
# 41 1 1 3 1 1
# 140 1 1 1 1 1
# 146 1 5 1 3 1
# 159 1 1 1 1 1
# 165 1 1 1 1 3
# 236 1 1 1 1 1
# 250 1 1 1 2 1
# 276 1 1 1 1 1
# 293 4 10 3 1 1
# 295 1 3 1 1 1
# 298 1 1 5 1 1
# 316 5 1 1 3 10
# 322 5 1 1 1 1
# 412 1 1 1 1 1
# 618 1 1 1 1 1
bc_clean = complete(bc_mean_data, 1)
#bc_clean
md.pattern(bc_clean)
# > md.pattern(bc_clean)
# /\ /\
# { `---' }
# { O O }
# ==> V <== No need for mice. This data set is completely observed.
# \ \|/ /
# `-----'
#
# code_num thickness uniformity_size uniformity_shape adhesion epithelial_size nuclei chromatin
# 699 1 1 1 1 1 1 1 1
# 0 0 0 0 0 0 0 0
# nucleoli mitoses class
# 699 1 1 1 0
# 0 0 0 0
################## 14.1.2 ########################################
#### 2. Use regression to impute values for the missing data ####
bc_ln_table = bc[1:10]
#bc_ln_table
bc_ln_data = mice(bc_ln_table,m=4, maxit = 5 ,method ='norm.predict',seed=50)
summary(bc_ln_data)
bc_ln_data$imp$nuclei
## This approach results in filling the missing values with NA as opposed to values derive
## from a linear regression (I tried both linear predictive and linear ignoring model errors).
## I will now look to solve for the missing variables manually using the approach discussed
## in office hours.
# Let's create a variable, missing, that holds the instances of ?.
missing = which(bc_missing$nuclei == "?",arr.ind = TRUE)
missing
# Let's now create a variable that removes the categorical column, class, and missing data.
continuous_data = bc[-missing,2:10]
continuous_data$nuclei = as.integer(continuous_data$nuclei)
# let's now build the linear model
lm_mod = lm(nuclei~thickness+uniformity_size+
uniformity_shape+adhesion+epithelial_size+
chromatin+nucleoli+mitoses,
data = continuous_data)
summary(lm_mod)
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 1.862817 0.162497 11.464 < 2e-16 ***
# continuous_data$thickness 0.068118 0.034746 1.960 0.05035 .
# continuous_data$uniformity_size 0.087939 0.063482 1.385 0.16643
# continuous_data$uniformity_shape 0.110046 0.061190 1.798 0.07255 .
# continuous_data$adhesion -0.076950 0.038270 -2.011 0.04475 *
# continuous_data$epithelial_size 0.043216 0.052123 0.829 0.40733
# continuous_data$chromatin 0.044536 0.049211 0.905 0.36579
# continuous_data$nucleoli 0.119422 0.037076 3.221 0.00134 **
# continuous_data$mitoses 0.001405 0.049448 0.028 0.97733
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# Residual standard error: 1.896 on 674 degrees of freedom
# Multiple R-squared: 0.2326, Adjusted R-squared: 0.2235
# F-statistic: 25.54 on 8 and 674 DF, p-value: < 2.2e-16
## As you can see, the robustness of the model is questionable (R-Squared of 0.23).
## Let's remove the insignificant variables and re-run the model
lm_mod1 = lm(nuclei~thickness + uniformity_shape + adhesion +
nucleoli, data = continuous_data)
summary(lm_mod1)
# This results in very little improvement but is more parsimonous so let's continue
# with this model.
nuclei_pred = predict(lm_mod1, newdata = bc[missing,])
# Let's look at our predicted values for the missing data. As you will see below,
# all figures are floating point and need to be rounded to integers.
nuclei_pred
# > nuclei_pred
# 24 41 140 146 159 165 236 250 276 293 295
# 3.967619 4.322290 2.322981 2.723996 2.523488 2.642191 3.084108 2.482586 2.883601 5.563110 2.322981
# 298 316 322 412 618
# 3.327197 4.252752 2.482586 2.322981 2.322981
round_nuclei_pred = round(nuclei_pred)
round_nuclei_pred
# > round_nuclei_pred
# 24 41 140 146 159 165 236 250 276 293 295 298 316 322 412 618
# 4 4 2 3 3 3 3 2 3 6 2 3 4 2 2 2
# Now, let's impute the rounded predicted values into a new dataset we'll create specifically for
# this linear imputation method.
bc_data_imputaton = bc
bc_data_imputaton[missing,]$nuclei = round_nuclei_pred
bc_data_imputaton$nuclei = as.numeric(bc_data_imputaton$nuclei)
# Let's view our imputed data as a sanity check to ensure there are no missing data or decimals.
bc_data_imputaton$nuclei
# Let's also make sure our data set is completely observed using mice pattern.
md.pattern(bc_data_imputaton)
# No need for mice. This data set is completely observed.
################## 14.1.3 ########################################
#### 3. Use regression with perturbation to impute values for the missing data
# What is perturbation? The definition states: a deviation of a system, moving object,
# or process from its regular or normal state or path, caused by an outside influence.
# Using regression (above) is more complex but leads to less biased data.
# With that said, regression also has the disadvantage of using the same data twice which could
# lead to overfitting the data.
# Ultimately, This doesn't capture all the variability in said data rows.
# An approach to solve this is perturbation – adding a random amount up or down for each imputed estimate.
# One final note, professor acknowledges that this approach often leads to less accuracy...
mu = mean(nuclei_pred)
# 3.096715
sd_hat = sd(nuclei_pred)
# 0.9522
pertub_val = rnorm(n = length(nuclei_pred), mean = mu, sd = sd_hat)
pertub_val
# As before we need to round these figures
round_pertub_val = round(pertub_val)
bc_data_pertub = bc
bc_data_pertub[missing,]$nuclei = round_pertub_val
bc_data_pertub$nuclei = as.numeric(bc_data_pertub$nuclei)
# Data check
md.pattern(bc_data_pertub)
################## 15.1 ########################################
# Describe a situation or problem from your job, everyday life, current events, etc., for which optimization
# would be appropriate. What data would you need?
# I work in the asset owner community building asset allocation models through numerous optimization methods.
# My work was described in class (essentially picking the index/universe you want to select securities from,
# adding constraints to ensure no position is too little, too large, or unattainable (when there is float issues)
# and then solving using mean & variance). Since that was described in class notes, I will pivot to a hobby of mine,
# nutrition. How do you optimize your diet? This was described using the army's dillemnia but it was more a
# method of providing the soldiers just enough to accomplish their stated missions. The questin I'd be looking to
# solve pertains to sports science - specifically what can you eat to improve your optimal performance within a
# specific sports competition.
# I'd need numerous randomized trials involving student athletes. I'd examine their gut biome, total caloric exertion
# on a day of an event, and allergens. I'd likely start with a quantifiable event, say sprinting, and design the
# experiment around a trial of 15-20 races using at least 5 athletes. The variables in my experiment would be composition sources
# (i.e. protein, carbohydrates, minerates, vitamins, etc). I'd add constraints to appease certain allergies,
# min/max intake (to prevent illness), and limit the intake to only natural foods (i.e. no synethetics).
# My optimization function will solve for the sum of each food source that minimizes the race time. I'd need to control
# for externalities such as sleep, tests, social life etc.
| /isye_hw_10.R | no_license | vinnycholewa/ISYE-Modeling | R | false | false | 13,816 | r | ################# Homework 10 ###################
setwd("/Users/vincentcholewa/Documents/GAT/ISYE/isye_wd")
# # Data Set Information:
# # Samples arrive periodically as Dr. Wolberg reports his clinical cases. The database therefore reflects this chronological
# #grouping of the data. This grouping information appears immediately below, having been removed from the data itself:
# # Group 1: 367 instances (January 1989)
# # Group 2: 70 instances (October 1989)
# # Group 3: 31 instances (February 1990)
# # Group 4: 17 instances (April 1990)
# # Group 5: 48 instances (August 1990)
# # Group 6: 49 instances (Updated January 1991)
# # Group 7: 31 instances (June 1991)
# # Group 8: 86 instances (November 1991)
#
# 1. Sample code number id number
# 2. Clump Thickness 1 - 10
# 3. Uniformity of Cell Size 1 - 10
# 4. Uniformity of Cell Shape 1 - 10
# 5. Marginal Adhesion 1 - 10
# 6. Single Epithelial Cell Size 1 - 10
# 7. Bare Nuclei 1 - 10
# 8. Bland Chromatin 1 - 10
# 9. Normal Nucleoli 1 - 10
# 10. Mitoses 1 - 10
# 11. Class: (2 for benign, 4 for malignant)
bc = read.table(file = "breast_cancer.data", header = FALSE, sep = ",",
col.names = c('code_num','thickness','uniformity_size', 'uniformity_shape', 'adhesion',
'epithelial_size',
'nuclei', 'chromatin', 'nucleoli', 'mitoses', 'class' ))
bc_missing = read.table(file = "breast_cancer.data", header = FALSE, sep = ",",
col.names = c('code_num','thickness','uniformity_size', 'uniformity_shape', 'adhesion',
'epithelial_size',
'nuclei', 'chromatin', 'nucleoli', 'mitoses', 'class' ))
#bc
bc_q <- bc == "?"
# replace elements with NA
is.na(bc) = bc_q
colSums(is.na(bc))
# > colSums(is.na(bc))
# code_num thickness uniformity_size uniformity_shape
# 0 0 0 0
# adhesion epithelial_size nuclei chromatin
# 0 0 16 0
# nucleoli mitoses class
# 0 0 0
# Bare nuclei appears to be the only variable that contains values we need to impute for
# and has a total of 16 instances that reflect NA.
## There are two types of missing data:
# 1. MCAR: missing completely at random. This is the desirable scenario in case of missing data.
# 2. MNAR: missing not at random. Missing not at random data is a more serious issue and in this case it
# might be wise to check the data gathering process further and try to understand why the information is missing.
# Let's have a look at the bare nuclei dataset
bc$nuclei
# When looking at the data set a large portion of NAs are covered from rows 136 to rows 316.
# This would be something to examine in more detail to see if a trend occured during a survey
# that is creating the missing fields
length((bc$nuclei))
# 699
# Per the lecture videos, the amount of n/a should not exceed 5%. As you will see below, this dataset's
# NA count amounts to only 2.2%
sum(is.na(bc$nuclei))/length((bc$nuclei))*100
# 2.288984%
# Let's review by looking at the summary statstics and charting a pairwise visual to assess correlations.
summary(bc)
pairs(bc)
library(mice)
library(VIM)
#T he package MICE is a good library to handle missing data.
# The package creates multiple imputations (replacement values) for multivariate missing data.
# The method is based on Fully Conditional Specification, where each incomplete variable is imputed by a separate model.
# The MICE algorithm can impute mixes of continuous, binary, unordered categorical and ordered categorical data.
# In addition, MICE can impute continuous two-level data, and maintain consistency between imputations by means of passive imputation.
# Many diagnostic plots are implemented to inspect the quality of the imputations.
# Main Functions of Mice
# mice() Impute the missing data *m* times
# with() Analyze completed data sets
# pool() Combine parameter estimates
# complete() Export imputed data
# ampute() Generate missing data
md.pairs(bc)
md.pattern(bc)
# > md.pattern(bc)
# code_num thickness uniformity_size uniformity_shape adhesion epithelial_size chromatin nucleoli mitoses class nuclei
# 683 1 1 1 1 1 1 1 1 1 1 1 0
# 16 1 1 1 1 1 1 1 1 1 1 0 1
# 0 0 0 0
# The MICE pattern function first states that we have 683 complete variables and 16 missing. It then delineates which
# variables are missing information.
# I found this from a tutorial that went through the MICE library. What this plot shows is the graphical representation of
# missing data. In our data set Bare Nuclei is the only variable that is missing data and accounts for a litte over 2%.
aggr_plot = aggr(bc$nuclei, col=c('navyblue','red'), numbers=TRUE, sortVars=TRUE, labels=names(bc),
ylab=c("Histogram of missing data","Pattern"))
# Convert dataset into data frame
bc_as_data = as.data.frame(bc)
# Plot pbox using pos = 7 (nuclei)
pbox(x = bc_as_data, pos = 7)
###################### Mean/Mode Imputation #############
# 14.1.1 Use the mean/mode imputation method to impute values for the missing data
bc_mean_data = mice(bc,m=5,maxit=5,method ='pmm',seed=500)
#### imputs ####
# m -> number of multiple imputations, maxit -> scaler giving the number of iterations,
# pmm -> predictive mean matching
summary(bc_mean_data)
# Let's check to determine if the mean forumula above worked correctly to replace NAs with
# the mean.
bc_mean_data$imp$nuclei
# > bc_data$imp$nuclei
# 1 2 3 4 5
# 24 10 4 10 5 10
# 41 1 1 3 1 1
# 140 1 1 1 1 1
# 146 1 5 1 3 1
# 159 1 1 1 1 1
# 165 1 1 1 1 3
# 236 1 1 1 1 1
# 250 1 1 1 2 1
# 276 1 1 1 1 1
# 293 4 10 3 1 1
# 295 1 3 1 1 1
# 298 1 1 5 1 1
# 316 5 1 1 3 10
# 322 5 1 1 1 1
# 412 1 1 1 1 1
# 618 1 1 1 1 1
bc_clean = complete(bc_mean_data, 1)
#bc_clean
md.pattern(bc_clean)
# > md.pattern(bc_clean)
# /\ /\
# { `---' }
# { O O }
# ==> V <== No need for mice. This data set is completely observed.
# \ \|/ /
# `-----'
#
# code_num thickness uniformity_size uniformity_shape adhesion epithelial_size nuclei chromatin
# 699 1 1 1 1 1 1 1 1
# 0 0 0 0 0 0 0 0
# nucleoli mitoses class
# 699 1 1 1 0
# 0 0 0 0
################## 14.1.2 ########################################
#### 2. Use regression to impute values for the missing data ####
bc_ln_table = bc[1:10]
#bc_ln_table
bc_ln_data = mice(bc_ln_table,m=4, maxit = 5 ,method ='norm.predict',seed=50)
summary(bc_ln_data)
bc_ln_data$imp$nuclei
## This approach results in filling the missing values with NA as opposed to values derive
## from a linear regression (I tried both linear predictive and linear ignoring model errors).
## I will now look to solve for the missing variables manually using the approach discussed
## in office hours.
# Let's create a variable, missing, that holds the instances of ?.
missing = which(bc_missing$nuclei == "?",arr.ind = TRUE)
missing
# Let's now create a variable that removes the categorical column, class, and missing data.
continuous_data = bc[-missing,2:10]
continuous_data$nuclei = as.integer(continuous_data$nuclei)
# let's now build the linear model
lm_mod = lm(nuclei~thickness+uniformity_size+
uniformity_shape+adhesion+epithelial_size+
chromatin+nucleoli+mitoses,
data = continuous_data)
summary(lm_mod)
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 1.862817 0.162497 11.464 < 2e-16 ***
# continuous_data$thickness 0.068118 0.034746 1.960 0.05035 .
# continuous_data$uniformity_size 0.087939 0.063482 1.385 0.16643
# continuous_data$uniformity_shape 0.110046 0.061190 1.798 0.07255 .
# continuous_data$adhesion -0.076950 0.038270 -2.011 0.04475 *
# continuous_data$epithelial_size 0.043216 0.052123 0.829 0.40733
# continuous_data$chromatin 0.044536 0.049211 0.905 0.36579
# continuous_data$nucleoli 0.119422 0.037076 3.221 0.00134 **
# continuous_data$mitoses 0.001405 0.049448 0.028 0.97733
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# Residual standard error: 1.896 on 674 degrees of freedom
# Multiple R-squared: 0.2326, Adjusted R-squared: 0.2235
# F-statistic: 25.54 on 8 and 674 DF, p-value: < 2.2e-16
## As you can see, the robustness of the model is questionable (R-Squared of 0.23).
## Let's remove the insignificant variables and re-run the model
lm_mod1 = lm(nuclei~thickness + uniformity_shape + adhesion +
nucleoli, data = continuous_data)
summary(lm_mod1)
# This results in very little improvement but is more parsimonous so let's continue
# with this model.
nuclei_pred = predict(lm_mod1, newdata = bc[missing,])
# Let's look at our predicted values for the missing data. As you will see below,
# all figures are floating point and need to be rounded to integers.
nuclei_pred
# > nuclei_pred
# 24 41 140 146 159 165 236 250 276 293 295
# 3.967619 4.322290 2.322981 2.723996 2.523488 2.642191 3.084108 2.482586 2.883601 5.563110 2.322981
# 298 316 322 412 618
# 3.327197 4.252752 2.482586 2.322981 2.322981
round_nuclei_pred = round(nuclei_pred)
round_nuclei_pred
# > round_nuclei_pred
# 24 41 140 146 159 165 236 250 276 293 295 298 316 322 412 618
# 4 4 2 3 3 3 3 2 3 6 2 3 4 2 2 2
# Now, let's impute the rounded predicted values into a new dataset we'll create specifically for
# this linear imputation method.
bc_data_imputaton = bc
bc_data_imputaton[missing,]$nuclei = round_nuclei_pred
bc_data_imputaton$nuclei = as.numeric(bc_data_imputaton$nuclei)
# Let's view our imputed data as a sanity check to ensure there are no missing data or decimals.
bc_data_imputaton$nuclei
# Let's also make sure our data set is completely observed using mice pattern.
md.pattern(bc_data_imputaton)
# No need for mice. This data set is completely observed.
################## 14.1.3 ########################################
#### 3. Use regression with perturbation to impute values for the missing data
# What is perturbation? The definition states: a deviation of a system, moving object,
# or process from its regular or normal state or path, caused by an outside influence.
# Using regression (above) is more complex but leads to less biased data.
# With that said, regression also has the disadvantage of using the same data twice which could
# lead to overfitting the data.
# Ultimately, This doesn't capture all the variability in said data rows.
# An approach to solve this is perturbation – adding a random amount up or down for each imputed estimate.
# One final note, professor acknowledges that this approach often leads to less accuracy...
mu = mean(nuclei_pred)
# 3.096715
sd_hat = sd(nuclei_pred)
# 0.9522
pertub_val = rnorm(n = length(nuclei_pred), mean = mu, sd = sd_hat)
pertub_val
# As before we need to round these figures
round_pertub_val = round(pertub_val)
bc_data_pertub = bc
bc_data_pertub[missing,]$nuclei = round_pertub_val
bc_data_pertub$nuclei = as.numeric(bc_data_pertub$nuclei)
# Data check
md.pattern(bc_data_pertub)
################## 15.1 ########################################
# Describe a situation or problem from your job, everyday life, current events, etc., for which optimization
# would be appropriate. What data would you need?
# I work in the asset owner community building asset allocation models through numerous optimization methods.
# My work was described in class (essentially picking the index/universe you want to select securities from,
# adding constraints to ensure no position is too little, too large, or unattainable (when there is float issues)
# and then solving using mean & variance). Since that was described in class notes, I will pivot to a hobby of mine,
# nutrition. How do you optimize your diet? This was described using the army's dillemnia but it was more a
# method of providing the soldiers just enough to accomplish their stated missions. The questin I'd be looking to
# solve pertains to sports science - specifically what can you eat to improve your optimal performance within a
# specific sports competition.
# I'd need numerous randomized trials involving student athletes. I'd examine their gut biome, total caloric exertion
# on a day of an event, and allergens. I'd likely start with a quantifiable event, say sprinting, and design the
# experiment around a trial of 15-20 races using at least 5 athletes. The variables in my experiment would be composition sources
# (i.e. protein, carbohydrates, minerates, vitamins, etc). I'd add constraints to appease certain allergies,
# min/max intake (to prevent illness), and limit the intake to only natural foods (i.e. no synethetics).
# My optimization function will solve for the sum of each food source that minimizes the race time. I'd need to control
# for externalities such as sleep, tests, social life etc.
|
vector <- []
w1 <- 'a'
w2 <- 'b'
w3 <- 'c'
w4 <- 'd'
p1 <- 0.1
p2 <- 0.2
p3 <- 0.3
p4 <- 0.4 # set to a = pmod5; where is the last significant digit of my roll number. In this case a = 9mod5 = 4.
n<- 10 #set the counter here
w <- 0 #counter variables
x <- 0
y <- 0
z <- 0
for(i in 1:n){
u<-runif(1,0,1)
if(u <= p1){
print(w1)
#vector <- c(vector, w1)
#w = w + 1 # w stores the frequncy of occurence of 'a'
}
if(p1 <= u && u <= p1+p2){
print(w2)
#vector <- c(vector, w2)
#x = x + 1 # x stores the frequncy of occurence of 'b'
}
if(p1+p2<= u && u <= p1+p2+p3){
print(w3)
#vector <- c(vector, w3)
#y = y + 1 # y stores the frequncy of occurence of 'c'
}
if(p1+p2+p3 <= u && u <= 1){ #because p1+p2+p3+p4 = 1
print(w4)
#vector <- c(vector, w4)
#z = z + 1 # z stores the frequncy of occurence of 'd'
}
# uncomment the below statement if you want to see the output of the simulation
#print(vector)
}
cat("frequency of a: ", w)
cat("frequency of b: ", x)
cat("frequency of c: ", y)
cat("frequency of d: ", z) | /probabilitySimulator.R | no_license | vijaylingam/Cryptography | R | false | false | 1,037 | r | vector <- []
w1 <- 'a'
w2 <- 'b'
w3 <- 'c'
w4 <- 'd'
p1 <- 0.1
p2 <- 0.2
p3 <- 0.3
p4 <- 0.4 # set to a = pmod5; where is the last significant digit of my roll number. In this case a = 9mod5 = 4.
n<- 10 #set the counter here
w <- 0 #counter variables
x <- 0
y <- 0
z <- 0
for(i in 1:n){
u<-runif(1,0,1)
if(u <= p1){
print(w1)
#vector <- c(vector, w1)
#w = w + 1 # w stores the frequncy of occurence of 'a'
}
if(p1 <= u && u <= p1+p2){
print(w2)
#vector <- c(vector, w2)
#x = x + 1 # x stores the frequncy of occurence of 'b'
}
if(p1+p2<= u && u <= p1+p2+p3){
print(w3)
#vector <- c(vector, w3)
#y = y + 1 # y stores the frequncy of occurence of 'c'
}
if(p1+p2+p3 <= u && u <= 1){ #because p1+p2+p3+p4 = 1
print(w4)
#vector <- c(vector, w4)
#z = z + 1 # z stores the frequncy of occurence of 'd'
}
# uncomment the below statement if you want to see the output of the simulation
#print(vector)
}
cat("frequency of a: ", w)
cat("frequency of b: ", x)
cat("frequency of c: ", y)
cat("frequency of d: ", z) |
# MDS Plot of Species Composition
## written by Alice Linder and Dan Flynn
### updated by Alice on 22 Dec. 2016
library(vegan)
library(dplyr)
library(tidyr)
library(reshape)
library(plyr)
library(reshape2)
library(ggplot2)
rm(list = ls())
setwd("~/GitHub/senior-moment/data")
# setwd("~/Documents/git/senior-moment/data") # For Dan
# MDS overstory
d <- read.csv("all.species.dbh.csv", row.names = NULL)
d <- d[,1:3]
#d <- d2[,-2]
# put data into correct format
overstory <- distinct(d)
overstory <- rename(overstory, c("Comp.Species" = "Species"))
# check
names(overstory)
# SOMETHING WRONG HERE
d <- melt(overstory, id = "Individual", measure.vars = "Species" )
over.all <- as.data.frame(acast(d, Individual ~ value, length))
head(over.all)
over.all <- t(over.all)
head(over.all)
# Analysis and summarizing richness of the overstory
richness <- apply(over.all, 2, sum)
?metaMDS
mds1 <- metaMDS(t(over.all), try = 100) # use t() to change it so that the communities are rows, and species are columns, which is the format that vegan uses
plot(mds1) # ok, lots of scatter, good ordination
overcomp <- data.frame(mds1$points)
overcomp$s <- richness # add our species richness calculations to this data frame
overcomp$sp <- substr(rownames(overcomp), 1, 6)
# Get the site by getting the last two characters of the overcomp rownames
overcomp$site <- unlist(
lapply(strsplit(rownames(overcomp), "_"),
function(x) x[[2]]))
# For each species, plot the species richness by site. Order sites by south -> north
overcomp$site <- as.factor(overcomp$site)
levels(overcomp$site) <- c(3, 1, 4, 2)
overcomp$site <- factor(as.numeric(as.character(overcomp$site)), labels = c("HF", "WM", "GR", "SH"))
# Clear differences with site, changing space along MDS1
colz = alpha(c("#E7298A", "#1B9E77", "#D95F02", "#7570B3"), 0.5)
# plot MDS overstory
plot(mds1, type = "n",
xlim = c(-2, 2),
ylim = c(-1.2, 2),
cex.lab = 2)
count = 1
for(i in unique(overcomp$site)){
ordihull(mds1, group = overcomp$site, label = F,
draw = "polygon", col = colz[count],
show.groups = i)
count = count + 1
}
legend("topleft",
fill = colz,
legend = c("Harvard Forest", "White Mountains", "Grant", "St. Hippolyte"),
bty = "n",
cex = 2)
title("Overstory", cex.main = 3)
?'x.lab'
rm(list = ls())
# plot MDS understory
d2 <- read.csv("understory.csv")
head(d2)
# Data cleaning
rownames(d2) = d2[,1] # move species names into rows
d2 <- d2[,-1]
head(d2)
# Analysis
# Summarizing the richness of the understory
summary(d2)
richness <- apply(d2, 2, sum)
mds2 <- metaMDS(t(d2), try = 100) # use t() to change it so that the communities are rows, and species are columns, which is the format that vegan uses
plot(mds2) # ok, lots of scatter, good ordination
undercomp <- data.frame(mds2$points)
undercomp$s <- richness # add our species richness calculations to this data frame
undercomp$sp <- substr(rownames(undercomp), 1, 6)
# Get the site by getting the last two characters of the undercomp rownames
undercomp$site <- unlist(
lapply(strsplit(rownames(undercomp), "_"),
function(x) x[[2]]))
# For each species, plot the species richness by site. Order sites by south -> north
undercomp$site <- as.factor(undercomp$site)
levels(undercomp$site) <- c(3, 1, 4, 2)
undercomp$site <- factor(as.numeric(as.character(undercomp$site)), labels = c("HF", "WM", "GR", "SH"))
# Clear differences with site, changing space along MDS1
colz = alpha(c("#E7298A", "#1B9E77", "#D95F02", "#7570B3"), 0.5)
plot(mds2, type = "n",
xlim = c(-1.5, 1.5),
ylim = c(-1.2, 2)
)
count = 1
for(i in unique(undercomp$site)){
ordihull(mds2, group = undercomp$site, label =F,
draw = "polygon", col = colz[count],
show.groups = i)
count = count + 1
}
legend("topleft",
fill = colz,
legend = c("Harvard Forest", "White Mountains", "Grant", "St. Hippolyte"),
bty = "n",
cex = 1.2)
title("Overstory", cex.main = 1.5) | /analyses/input/Fig1-MDS.R | no_license | alicelinder/senior-moment | R | false | false | 4,037 | r | # MDS Plot of Species Composition
## written by Alice Linder and Dan Flynn
### updated by Alice on 22 Dec. 2016
library(vegan)
library(dplyr)
library(tidyr)
library(reshape)
library(plyr)
library(reshape2)
library(ggplot2)
rm(list = ls())
setwd("~/GitHub/senior-moment/data")
# setwd("~/Documents/git/senior-moment/data") # For Dan
# MDS overstory
d <- read.csv("all.species.dbh.csv", row.names = NULL)
d <- d[,1:3]
#d <- d2[,-2]
# put data into correct format
overstory <- distinct(d)
overstory <- rename(overstory, c("Comp.Species" = "Species"))
# check
names(overstory)
# SOMETHING WRONG HERE
d <- melt(overstory, id = "Individual", measure.vars = "Species" )
over.all <- as.data.frame(acast(d, Individual ~ value, length))
head(over.all)
over.all <- t(over.all)
head(over.all)
# Analysis and summarizing richness of the overstory
richness <- apply(over.all, 2, sum)
?metaMDS
mds1 <- metaMDS(t(over.all), try = 100) # use t() to change it so that the communities are rows, and species are columns, which is the format that vegan uses
plot(mds1) # ok, lots of scatter, good ordination
overcomp <- data.frame(mds1$points)
overcomp$s <- richness # add our species richness calculations to this data frame
overcomp$sp <- substr(rownames(overcomp), 1, 6)
# Get the site by getting the last two characters of the overcomp rownames
overcomp$site <- unlist(
lapply(strsplit(rownames(overcomp), "_"),
function(x) x[[2]]))
# For each species, plot the species richness by site. Order sites by south -> north
overcomp$site <- as.factor(overcomp$site)
levels(overcomp$site) <- c(3, 1, 4, 2)
overcomp$site <- factor(as.numeric(as.character(overcomp$site)), labels = c("HF", "WM", "GR", "SH"))
# Clear differences with site, changing space along MDS1
colz = alpha(c("#E7298A", "#1B9E77", "#D95F02", "#7570B3"), 0.5)
# plot MDS overstory
plot(mds1, type = "n",
xlim = c(-2, 2),
ylim = c(-1.2, 2),
cex.lab = 2)
count = 1
for(i in unique(overcomp$site)){
ordihull(mds1, group = overcomp$site, label = F,
draw = "polygon", col = colz[count],
show.groups = i)
count = count + 1
}
legend("topleft",
fill = colz,
legend = c("Harvard Forest", "White Mountains", "Grant", "St. Hippolyte"),
bty = "n",
cex = 2)
title("Overstory", cex.main = 3)
?'x.lab'
rm(list = ls())
# plot MDS understory
d2 <- read.csv("understory.csv")
head(d2)
# Data cleaning
rownames(d2) = d2[,1] # move species names into rows
d2 <- d2[,-1]
head(d2)
# Analysis
# Summarizing the richness of the understory
summary(d2)
richness <- apply(d2, 2, sum)
mds2 <- metaMDS(t(d2), try = 100) # use t() to change it so that the communities are rows, and species are columns, which is the format that vegan uses
plot(mds2) # ok, lots of scatter, good ordination
undercomp <- data.frame(mds2$points)
undercomp$s <- richness # add our species richness calculations to this data frame
undercomp$sp <- substr(rownames(undercomp), 1, 6)
# Get the site by getting the last two characters of the undercomp rownames
undercomp$site <- unlist(
lapply(strsplit(rownames(undercomp), "_"),
function(x) x[[2]]))
# For each species, plot the species richness by site. Order sites by south -> north
undercomp$site <- as.factor(undercomp$site)
levels(undercomp$site) <- c(3, 1, 4, 2)
undercomp$site <- factor(as.numeric(as.character(undercomp$site)), labels = c("HF", "WM", "GR", "SH"))
# Clear differences with site, changing space along MDS1
colz = alpha(c("#E7298A", "#1B9E77", "#D95F02", "#7570B3"), 0.5)
plot(mds2, type = "n",
xlim = c(-1.5, 1.5),
ylim = c(-1.2, 2)
)
count = 1
for(i in unique(undercomp$site)){
ordihull(mds2, group = undercomp$site, label =F,
draw = "polygon", col = colz[count],
show.groups = i)
count = count + 1
}
legend("topleft",
fill = colz,
legend = c("Harvard Forest", "White Mountains", "Grant", "St. Hippolyte"),
bty = "n",
cex = 1.2)
title("Overstory", cex.main = 1.5) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairing-methods.R
\name{cross_tab_tbl}
\alias{cross_tab_tbl}
\title{Generate a 2d cross tab using arbitrary numbers of columns as factors}
\usage{
cross_tab_tbl(tbl, x_fields, y_fields)
}
\arguments{
\item{tbl}{\code{data.frame}}
\item{x_fields}{\code{character} fields in \code{tbl}}
\item{y_fields}{\code{character} fields in \code{tbl}}
}
\value{
\code{tibble}
}
\description{
As many rows as unique combs of x_fields
As many columns as unique combs of y_fields
No NA.
}
\examples{
cross_tab_tbl(mtcars, c('cyl', 'gear'), 'carb')
}
| /man/cross_tab_tbl.Rd | no_license | amcdavid/CellaRepertorium | R | false | true | 615 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairing-methods.R
\name{cross_tab_tbl}
\alias{cross_tab_tbl}
\title{Generate a 2d cross tab using arbitrary numbers of columns as factors}
\usage{
cross_tab_tbl(tbl, x_fields, y_fields)
}
\arguments{
\item{tbl}{\code{data.frame}}
\item{x_fields}{\code{character} fields in \code{tbl}}
\item{y_fields}{\code{character} fields in \code{tbl}}
}
\value{
\code{tibble}
}
\description{
As many rows as unique combs of x_fields
As many columns as unique combs of y_fields
No NA.
}
\examples{
cross_tab_tbl(mtcars, c('cyl', 'gear'), 'carb')
}
|
#' Extracts data associated with a Spark ML model
#'
#' @param object a Spark ML model
#' @return A tbl_spark
#' @export
ml_model_data <- function(object) {
sdf_register(object$data)
}
possibly_null <- function(.f) purrr::possibly(.f, otherwise = NULL)
#' @export
predict.ml_model_classification <- function(object,
newdata = ml_model_data(object),
...) {
ml_predict(object, newdata) %>%
sdf_read_column("predicted_label")
}
#' @export
predict.ml_model_regression <- function(object, newdata = ml_model_data(object), ...) {
prediction_col <- ml_param(object$model, "prediction_col")
ml_predict(object, newdata) %>%
sdf_read_column(prediction_col)
}
#' @export
fitted.ml_model_prediction <- function(object, ...) {
prediction_col <- object$model %>%
ml_param("prediction_col")
object %>%
ml_predict() %>%
dplyr::pull(!!rlang::sym(prediction_col))
}
#' @export
residuals.ml_model <- function(object, ...) {
stop("'residuals()' not supported for ", class(object)[[1L]])
}
#' Model Residuals
#'
#' This generic method returns a Spark DataFrame with model
#' residuals added as a column to the model training data.
#'
#' @param object Spark ML model object.
#' @param ... additional arguments
#'
#' @rdname sdf_residuals
#'
#' @export
sdf_residuals <- function(object, ...) {
UseMethod("sdf_residuals")
}
read_spark_vector <- function(jobj, field) {
object <- invoke(jobj, field)
invoke(object, "toArray")
}
read_spark_matrix <- function(jobj, field = NULL) {
object <- if (rlang::is_null(field)) jobj else invoke(jobj, field)
nrow <- invoke(object, "numRows")
ncol <- invoke(object, "numCols")
data <- invoke(object, "toArray")
matrix(data, nrow = nrow, ncol = ncol)
}
ml_short_type <- function(x) {
jobj_class(spark_jobj(x))[1]
}
spark_dense_matrix <- function(sc, mat) {
if (is.null(mat)) {
return(mat)
}
invoke_new(
sc, "org.apache.spark.ml.linalg.DenseMatrix", dim(mat)[1L], dim(mat)[2L],
as.list(mat)
)
}
spark_dense_vector <- function(sc, vec) {
if (is.null(vec)) {
return(vec)
}
invoke_static(
sc, "org.apache.spark.ml.linalg.Vectors", "dense",
as.list(vec)
)
}
spark_sql_column <- function(sc, col, alias = NULL) {
jobj <- invoke_new(sc, "org.apache.spark.sql.Column", col)
if (!is.null(alias)) {
jobj <- invoke(jobj, "alias", alias)
}
jobj
}
make_stats_arranger <- function(fit_intercept) {
if (fit_intercept) {
function(x) {
force(x)
c(tail(x, 1), head(x, length(x) - 1))
}
} else {
identity
}
}
# ----------------------------- ML helpers -------------------------------------
ml_process_model <- function(x, uid, spark_class, r_class, invoke_steps, ml_function,
formula = NULL, response = NULL, features = NULL) {
sc <- spark_connection(x)
args <- list(sc, spark_class)
if (!is.null(uid)) {
uid <- cast_string(uid)
args <- append(args, list(uid))
}
jobj <- do.call(invoke_new, args)
l_steps <- purrr::imap(invoke_steps, ~ list(.y, .x))
for(i in seq_along(l_steps)) {
if(!is.null(l_steps[[i]][[2]])) {
jobj <- do.call(invoke, c(jobj, l_steps[[i]]))
}
}
new_estimator <- new_ml_estimator(jobj, class = r_class)
post_ml_obj(
x = x,
nm = new_estimator,
ml_function = ml_function,
formula = formula,
response = response,
features = features,
features_col = invoke_steps$setFeaturesCol,
label_col = invoke_steps$setLabelCol
)
}
param_min_version <- function(x, value, min_version = NULL) {
ret <- value
if (!is.null(value)) {
if (!is.null(min_version)) {
sc <- spark_connection(x)
ver <- spark_version(sc)
if (ver < min_version) {
warning(paste0(
"Parameter `", deparse(substitute(value)),
"` is only available for Spark ", min_version, " and later.",
"The value will not be passed to the model."
))
ret <- NULL
}
}
}
ret
}
# --------------------- Post conversion functions ------------------------------
post_ml_obj <- function(x, nm, ml_function, formula, response,
features, features_col, label_col) {
UseMethod("post_ml_obj")
}
post_ml_obj.spark_connection <- function(x, nm, ml_function, formula, response,
features, features_col, label_col) {
nm
}
post_ml_obj.ml_pipeline <- function(x, nm, ml_function, formula, response,
features, features_col, label_col) {
ml_add_stage(x, nm)
}
post_ml_obj.tbl_spark <- function(x, nm, ml_function, formula, response,
features, features_col, label_col) {
formula <- ml_standardize_formula(formula, response, features)
if (is.null(formula)) {
ml_fit(nm, x)
} else {
ml_construct_model_supervised(
ml_function,
predictor = nm,
formula = formula,
dataset = x,
features_col = features_col,
label_col = label_col
)
}
}
| /R/ml_utils.R | permissive | yitao-li/sparklyr | R | false | false | 5,107 | r | #' Extracts data associated with a Spark ML model
#'
#' @param object a Spark ML model
#' @return A tbl_spark
#' @export
ml_model_data <- function(object) {
sdf_register(object$data)
}
possibly_null <- function(.f) purrr::possibly(.f, otherwise = NULL)
#' @export
predict.ml_model_classification <- function(object,
newdata = ml_model_data(object),
...) {
ml_predict(object, newdata) %>%
sdf_read_column("predicted_label")
}
#' @export
predict.ml_model_regression <- function(object, newdata = ml_model_data(object), ...) {
prediction_col <- ml_param(object$model, "prediction_col")
ml_predict(object, newdata) %>%
sdf_read_column(prediction_col)
}
#' @export
fitted.ml_model_prediction <- function(object, ...) {
prediction_col <- object$model %>%
ml_param("prediction_col")
object %>%
ml_predict() %>%
dplyr::pull(!!rlang::sym(prediction_col))
}
#' @export
residuals.ml_model <- function(object, ...) {
stop("'residuals()' not supported for ", class(object)[[1L]])
}
#' Model Residuals
#'
#' This generic method returns a Spark DataFrame with model
#' residuals added as a column to the model training data.
#'
#' @param object Spark ML model object.
#' @param ... additional arguments
#'
#' @rdname sdf_residuals
#'
#' @export
sdf_residuals <- function(object, ...) {
UseMethod("sdf_residuals")
}
read_spark_vector <- function(jobj, field) {
object <- invoke(jobj, field)
invoke(object, "toArray")
}
read_spark_matrix <- function(jobj, field = NULL) {
object <- if (rlang::is_null(field)) jobj else invoke(jobj, field)
nrow <- invoke(object, "numRows")
ncol <- invoke(object, "numCols")
data <- invoke(object, "toArray")
matrix(data, nrow = nrow, ncol = ncol)
}
ml_short_type <- function(x) {
jobj_class(spark_jobj(x))[1]
}
spark_dense_matrix <- function(sc, mat) {
if (is.null(mat)) {
return(mat)
}
invoke_new(
sc, "org.apache.spark.ml.linalg.DenseMatrix", dim(mat)[1L], dim(mat)[2L],
as.list(mat)
)
}
spark_dense_vector <- function(sc, vec) {
if (is.null(vec)) {
return(vec)
}
invoke_static(
sc, "org.apache.spark.ml.linalg.Vectors", "dense",
as.list(vec)
)
}
spark_sql_column <- function(sc, col, alias = NULL) {
jobj <- invoke_new(sc, "org.apache.spark.sql.Column", col)
if (!is.null(alias)) {
jobj <- invoke(jobj, "alias", alias)
}
jobj
}
make_stats_arranger <- function(fit_intercept) {
if (fit_intercept) {
function(x) {
force(x)
c(tail(x, 1), head(x, length(x) - 1))
}
} else {
identity
}
}
# ----------------------------- ML helpers -------------------------------------
ml_process_model <- function(x, uid, spark_class, r_class, invoke_steps, ml_function,
formula = NULL, response = NULL, features = NULL) {
sc <- spark_connection(x)
args <- list(sc, spark_class)
if (!is.null(uid)) {
uid <- cast_string(uid)
args <- append(args, list(uid))
}
jobj <- do.call(invoke_new, args)
l_steps <- purrr::imap(invoke_steps, ~ list(.y, .x))
for(i in seq_along(l_steps)) {
if(!is.null(l_steps[[i]][[2]])) {
jobj <- do.call(invoke, c(jobj, l_steps[[i]]))
}
}
new_estimator <- new_ml_estimator(jobj, class = r_class)
post_ml_obj(
x = x,
nm = new_estimator,
ml_function = ml_function,
formula = formula,
response = response,
features = features,
features_col = invoke_steps$setFeaturesCol,
label_col = invoke_steps$setLabelCol
)
}
param_min_version <- function(x, value, min_version = NULL) {
ret <- value
if (!is.null(value)) {
if (!is.null(min_version)) {
sc <- spark_connection(x)
ver <- spark_version(sc)
if (ver < min_version) {
warning(paste0(
"Parameter `", deparse(substitute(value)),
"` is only available for Spark ", min_version, " and later.",
"The value will not be passed to the model."
))
ret <- NULL
}
}
}
ret
}
# --------------------- Post conversion functions ------------------------------
post_ml_obj <- function(x, nm, ml_function, formula, response,
features, features_col, label_col) {
UseMethod("post_ml_obj")
}
post_ml_obj.spark_connection <- function(x, nm, ml_function, formula, response,
features, features_col, label_col) {
nm
}
post_ml_obj.ml_pipeline <- function(x, nm, ml_function, formula, response,
features, features_col, label_col) {
ml_add_stage(x, nm)
}
post_ml_obj.tbl_spark <- function(x, nm, ml_function, formula, response,
features, features_col, label_col) {
formula <- ml_standardize_formula(formula, response, features)
if (is.null(formula)) {
ml_fit(nm, x)
} else {
ml_construct_model_supervised(
ml_function,
predictor = nm,
formula = formula,
dataset = x,
features_col = features_col,
label_col = label_col
)
}
}
|
## Load a beat file and a blood vessel diameter file and calculates a blood flow column from velocity within the beat file. Diameter is applied on a beat-by-beat basis.
#ID = subject ID. eg. "NVT24"
#condition = "PRE" or "POST"
#time = a vector of the end-times associated with stage 0, 15, and 30. eg. time = c(7690.034461+60,8010.123878+60,8230.899836+60)
#diameter = a vector of the diameters for all four stages (0, 15, 30, and 45). eg. diameter = c(0.33645, 0.33013, 0.33654, 0.33876)
#f_out the output file name. eg. f_out = "beat_NVT24_pre_control.csv")
# PRE- beat_file_Q_calculation(ID = "NVT36", condition = "PRE", time = c(4459.21036 + 60, 4652.000393 + 60, 4842.302376 + 60), diameter = c(0.430242527, 0.430868095, 0.423268884, 0.422153326), f_out = "beat_NVT36_pre_control.csv")
#POST - beat_file_Q_calculation(ID = "NVT36", condition = "POST", time = c(7968.690134 + 60, 8675.798401 + 60, 8862.106388 + 60), diameter = c(0.414332778, 0.411212222, 0.407021099, 0.419442222), f_out = "beat_NVT36_post_control.csv")
beat_file_Q_calculation <- function(ID, condition, time, diameter, f_out){
library(dplyr)
#Load beat file (after applying any blood pressure correction (see Beat_File_BP_Correct.R))
beat_in <- file.choose()
beat <- read.csv(beat_in, header = TRUE)
#Load Diameter summary file (after using the Diameter Summarize NVT Study.RMD script)
#Diameter analysis done differently by Troy...adjusting code to insert average diameter directly
stage <- c("0_LBNP", "15_LBNP", "30_LBNP", "45_LBNP")
diameter <- as.data.frame(cbind(ID, condition, stage, diameter))
diameter$diameter <- as.numeric(as.character(diameter$diameter))
time_stage <- c("t_0", "t_15", "t_30")
time <- as.data.frame(cbind(ID, condition, time_stage, time))
time$time <- as.numeric(as.character(time$time))
#calculate blood flow on a stage by stage basis
beat_t0 <- filter(beat, Time <= subset(time, time_stage == "t_0", select = "time", drop = TRUE)) %>% mutate(., Q_BA = V_ba*(pi*(subset(diameter, stage == "0_LBNP", select = "diameter", drop = TRUE)/2)^2)*60)
beat_t15 <- filter(beat, Time > subset(time, time_stage == "t_0", select = "time", drop = TRUE) & Time <= subset(time, time_stage == "t_15", select = "time", drop = TRUE)) %>% mutate(., Q_BA = V_ba*(pi*(subset(diameter, stage == "15_LBNP", select = "diameter", drop = TRUE)/2)^2)*60)
beat_t30 <- filter(beat, Time > subset(time, time_stage == "t_15", select = "time", drop = TRUE) & Time <= subset(time, time_stage == "t_30", select = "time", drop = TRUE)) %>% mutate(., Q_BA = V_ba*(pi*(subset(diameter, stage == "30_LBNP", select = "diameter", drop = TRUE)/2)^2)*60)
beat_t45 <- filter(beat, Time > subset(time, time_stage == "t_30", select = "time", drop = TRUE)) %>% mutate(., Q_BA = V_ba*(pi*(subset(diameter, stage == "45_LBNP", select = "diameter", drop = TRUE)/2)^2)*60)
#combine stages into a single data frame.
beat <- rbind(beat_t0, beat_t15, beat_t30, beat_t45)
#add additional calculations if needed. For example FVR and FVC are calculated below.
beat <- mutate(beat, FVR = beat$MAP/beat$Q_BA, FVC = beat$Q_BA/beat$MAP)
beat <- mutate(beat, SV = (beat$CO/beat$HR)*1000, TPR = beat$MAP/beat$CO)
#Output files
write.csv(beat, file = paste(dirname(beat_in),"/",f_out, sep = ""), row.names = FALSE, quote = FALSE)
write.csv(diameter, file = paste(dirname(beat_in), "/", ID, "_diameter_summary_", condition, ".csv", sep = ""), row.names = FALSE)
}
| /2_Beat_File_Q_Calculation.R | no_license | gefoster11/CPLEAP | R | false | false | 3,557 | r | ## Load a beat file and a blood vessel diameter file and calculates a blood flow column from velocity within the beat file. Diameter is applied on a beat-by-beat basis.
#ID = subject ID. eg. "NVT24"
#condition = "PRE" or "POST"
#time = a vector of the end-times associated with stage 0, 15, and 30. eg. time = c(7690.034461+60,8010.123878+60,8230.899836+60)
#diameter = a vector of the diameters for all four stages (0, 15, 30, and 45). eg. diameter = c(0.33645, 0.33013, 0.33654, 0.33876)
#f_out the output file name. eg. f_out = "beat_NVT24_pre_control.csv")
# PRE- beat_file_Q_calculation(ID = "NVT36", condition = "PRE", time = c(4459.21036 + 60, 4652.000393 + 60, 4842.302376 + 60), diameter = c(0.430242527, 0.430868095, 0.423268884, 0.422153326), f_out = "beat_NVT36_pre_control.csv")
#POST - beat_file_Q_calculation(ID = "NVT36", condition = "POST", time = c(7968.690134 + 60, 8675.798401 + 60, 8862.106388 + 60), diameter = c(0.414332778, 0.411212222, 0.407021099, 0.419442222), f_out = "beat_NVT36_post_control.csv")
beat_file_Q_calculation <- function(ID, condition, time, diameter, f_out){
library(dplyr)
#Load beat file (after applying any blood pressure correction (see Beat_File_BP_Correct.R))
beat_in <- file.choose()
beat <- read.csv(beat_in, header = TRUE)
#Load Diameter summary file (after using the Diameter Summarize NVT Study.RMD script)
#Diameter analysis done differently by Troy...adjusting code to insert average diameter directly
stage <- c("0_LBNP", "15_LBNP", "30_LBNP", "45_LBNP")
diameter <- as.data.frame(cbind(ID, condition, stage, diameter))
diameter$diameter <- as.numeric(as.character(diameter$diameter))
time_stage <- c("t_0", "t_15", "t_30")
time <- as.data.frame(cbind(ID, condition, time_stage, time))
time$time <- as.numeric(as.character(time$time))
#calculate blood flow on a stage by stage basis
beat_t0 <- filter(beat, Time <= subset(time, time_stage == "t_0", select = "time", drop = TRUE)) %>% mutate(., Q_BA = V_ba*(pi*(subset(diameter, stage == "0_LBNP", select = "diameter", drop = TRUE)/2)^2)*60)
beat_t15 <- filter(beat, Time > subset(time, time_stage == "t_0", select = "time", drop = TRUE) & Time <= subset(time, time_stage == "t_15", select = "time", drop = TRUE)) %>% mutate(., Q_BA = V_ba*(pi*(subset(diameter, stage == "15_LBNP", select = "diameter", drop = TRUE)/2)^2)*60)
beat_t30 <- filter(beat, Time > subset(time, time_stage == "t_15", select = "time", drop = TRUE) & Time <= subset(time, time_stage == "t_30", select = "time", drop = TRUE)) %>% mutate(., Q_BA = V_ba*(pi*(subset(diameter, stage == "30_LBNP", select = "diameter", drop = TRUE)/2)^2)*60)
beat_t45 <- filter(beat, Time > subset(time, time_stage == "t_30", select = "time", drop = TRUE)) %>% mutate(., Q_BA = V_ba*(pi*(subset(diameter, stage == "45_LBNP", select = "diameter", drop = TRUE)/2)^2)*60)
#combine stages into a single data frame.
beat <- rbind(beat_t0, beat_t15, beat_t30, beat_t45)
#add additional calculations if needed. For example FVR and FVC are calculated below.
beat <- mutate(beat, FVR = beat$MAP/beat$Q_BA, FVC = beat$Q_BA/beat$MAP)
beat <- mutate(beat, SV = (beat$CO/beat$HR)*1000, TPR = beat$MAP/beat$CO)
#Output files
write.csv(beat, file = paste(dirname(beat_in),"/",f_out, sep = ""), row.names = FALSE, quote = FALSE)
write.csv(diameter, file = paste(dirname(beat_in), "/", ID, "_diameter_summary_", condition, ".csv", sep = ""), row.names = FALSE)
}
|
#### Question 1
fileurl = "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
download.file(fileurl, destfile="./comm.csv",method="curl")
data = read.csv("comm.csv", header=TRUE)
data$agricultureLogical = ifelse(data$ACR==3 & data$AGS==6, TRUE, FALSE)
which(data$agricultureLogical)
#### Question 2
library(jpeg)
fileurl <-"https://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg"
download.file(fileurl, destfile="./jeff.jpeg",method="curl")
jpeg <- readJPEG("jeff.jpeg", native = TRUE)
jpegquant <- quantile(jpeg, probs = seq(0, 1, 0.10))
#### Question 3
fileurl = "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
download.file(fileurl, destfile="./GDP.csv",method="curl")
fileurl2 = "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv"
download.file(fileurl2, destfile="./EDU.csv",method="curl")
gdp = read.csv("GDP.csv", header=TRUE)
gdp = gdp[5:194,]
edu = read.csv("EDU.csv", header=TRUE)
merged = merge(gdp,edu, by.x="X", by.y="CountryCode")
merged$Gross.domestic.product.2012 = as.numeric(levels(merged$Gross.domestic.product.2012))[merged$Gross.domestic.product.2012]
sorted = merged[order(merged$Gross.domestic.product.2012, decreasing=TRUE),]
#### Question 4
oecd = merged[merged$Income.Group=="High income: OECD",]
nonoecd = merged[merged$Income.Group=="High income: nonOECD",]
mean(oecd$Gross.domestic.product.2012)
mean(nonoecd$Gross.domestic.product.2012)
#### Question 5
merged$quantile = cut(merged$Gross.domestic.product.2012, breaks = quantile(merged$Gross.domestic.product.2012, probs = seq(0.01, 1, 0.20)))
table(merged$quantile, merged$Income.Group)
| /Quiz 3.R | no_license | fedecarles/datasciencecoursera | R | false | false | 1,637 | r | #### Question 1
fileurl = "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
download.file(fileurl, destfile="./comm.csv",method="curl")
data = read.csv("comm.csv", header=TRUE)
data$agricultureLogical = ifelse(data$ACR==3 & data$AGS==6, TRUE, FALSE)
which(data$agricultureLogical)
#### Question 2
library(jpeg)
fileurl <-"https://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg"
download.file(fileurl, destfile="./jeff.jpeg",method="curl")
jpeg <- readJPEG("jeff.jpeg", native = TRUE)
jpegquant <- quantile(jpeg, probs = seq(0, 1, 0.10))
#### Question 3
fileurl = "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
download.file(fileurl, destfile="./GDP.csv",method="curl")
fileurl2 = "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv"
download.file(fileurl2, destfile="./EDU.csv",method="curl")
gdp = read.csv("GDP.csv", header=TRUE)
gdp = gdp[5:194,]
edu = read.csv("EDU.csv", header=TRUE)
merged = merge(gdp,edu, by.x="X", by.y="CountryCode")
merged$Gross.domestic.product.2012 = as.numeric(levels(merged$Gross.domestic.product.2012))[merged$Gross.domestic.product.2012]
sorted = merged[order(merged$Gross.domestic.product.2012, decreasing=TRUE),]
#### Question 4
oecd = merged[merged$Income.Group=="High income: OECD",]
nonoecd = merged[merged$Income.Group=="High income: nonOECD",]
mean(oecd$Gross.domestic.product.2012)
mean(nonoecd$Gross.domestic.product.2012)
#### Question 5
merged$quantile = cut(merged$Gross.domestic.product.2012, breaks = quantile(merged$Gross.domestic.product.2012, probs = seq(0.01, 1, 0.20)))
table(merged$quantile, merged$Income.Group)
|
###########################
###
### Basic util functions
###
###########################
clean_mem <- function() {
invisible(gc())
}
norm2 <- function(vec) {
val <- norm(as.matrix(vec), type = "2")
return(val)
}
norm1 <- function(vec) {
val <- norm(as.matrix(vec), type = "1")
return(val)
}
rep_row <- function(x, n) {
matrix(rep(x, each = n), nrow = n)
}
exp_fun <- function(x) {
val <- exp(x)
if (length(val) > 1) {
for (i in 1:length(val)) {
if (val[i] > .Machine$double.xmax) {
val[i] <- .Machine$double.xmax
}
}
} else {
if (val > .Machine$double.xmax) {
val <- .Machine$double.xmax
}
}
return(val)
}
log_fun <- function(x) {
val <- log(x)
if (abs(val) > .Machine$double.xmax) {
val <- sign(val) * .Machine$double.xmax
}
if (abs(val) < .Machine$double.xmin) {
val <- sign(val) * .Machine$double.xmin
}
return(val)
}
outer_fun <- function(v) {
outer(v, v)
}
# Sort and order the blocks by size
sort_blocks <- function(net_list, mod_names) {
# Get the sizes of each block
n_ <- numeric(length(net_list))
for (i in 1:length(net_list)) {
n_[i] <- net_list[[i]]$gal$n
}
sort_order <- order(n_)
net_list_order <- rep(list(NULL), length(net_list))
mod_names_order <- rep(list(NULL), length(mod_names))
for (i in 1:length(net_list)) {
net_list_order[[i]] <- net_list[[sort_order[i]]]
mod_names_order[[i]] <- mod_names[[sort_order[i]]]
}
return(list(net_list = net_list_order, mod_names = mod_names_order, sort_order = sort_order))
}
dim_fun <- function(n_obj, n_groups, eta_len) {
sizes <- split_blocks(n_obj, n_groups)
dims <- matrix(0, nrow = n_obj, ncol = eta_len)
for (i in 1:n_groups) {
num_ <- sizes[i]
dim_1 <- 1 + sum(sizes[seq_spec(i, adjust = -1)])
dim_2 <- sum(sizes[1:i])
dims[dim_1:dim_2, ] <-
rep_row(seq(1 + (i - 1) * eta_len, eta_len * i), sizes[i])
}
return(list(dims = dims, sizes = sizes))
}
split_blocks <- function(n_obj, n_groups) {
base_ <- n_obj %/% n_groups
left_ <- n_obj %% n_groups
sizes <- rep(base_, n_groups) + c(rep(1, left_), rep(0, n_groups - left_))
return(sizes)
}
seq_spec <- function(i, adjust = 0) {
if (i == 1) {
return(numeric(0))
} else {
return(1:(i + adjust))
}
}
make_eta_fun <- function(num_group, parameterization) {
if (parameterization == "multi_group") {
eta_fun <- function(eta) {
num_ <- 1
len_ <- length(eta) / num_
eta_base <- eta[1:len_]
eta_val <- eta_base
for (i in 2:num_) {
dim_1 <- 1 + len_ * (i - 1)
dim_2 <- len_ * i
cur_val <- eta_base + eta[dim_1:dim_2]
eta_val <- c(eta_val, cur_val)
}
return(eta_val)
}
body(eta_fun)[[2]] <- substitute(num_ <- num_group,
list(num_group = num_group))
} else if (parameterization == "size") {
eta_fun <- function(eta) {
return(eta)
}
}
return(eta_fun)
}
make_eta_grad <- function(num_group, parameterization) {
if (parameterization == "multi_group") {
eta_grad <- function(eta) {
num_ <- 1
len_ <- length(eta) / num_
eta_grad_val <- diag(len_)
for (i in 2:num_) {
eta_grad_val <- as.matrix(bdiag(eta_grad_val, diag(len_)))
}
eta_grad_val[ , 1:len_] <- rbind(t(matrix(rep(diag(len_), num_group),
nrow = len_,
ncol = num_group * len_)))
return(eta_grad_val)
}
body(eta_grad)[[2]] <- substitute(num_ <- num_group,
list(num_group = num_group))
} else if (parameterization == "size") {
eta_grad <- function(eta) {
return(diag(length(eta)))
}
}
return(eta_grad)
}
assign_labels <- function(K, sizes) {
labels <- numeric(K)
size_ <- c(0, sizes)
for (i in 1:K) {
labels[i] <- max(which(i > cumsum(size_)))
}
return(labels)
}
make_return_obj <- function(obj, labels, sort_order) {
n_ <- length(unique(labels))
return_list <- rep(list(NULL), n_)
len_ <- length(obj$est$eta) / n_
names(return_list) <- sprintf("group%i", 1:n_)
grad <- obj$est$eta_grad(obj$est$eta)
info_mat <- t(solve(grad)) %*% obj$est$info_mat %*% solve(grad)
se_vec <- sqrt(diag(solve(info_mat)))
for (i in 1:n_) {
return_list[[i]] <- list(labels = NULL, estimates = NULL, se = NULL)
return_list[[i]]$labels <- sort(sort_order[labels == i])
dim_1 <- 1 + len_ * (i - 1)
dim_2 <- len_ * i
return_list[[i]]$estimates <- obj$est$eta_fun(obj$est$eta)[dim_1:dim_2]
return_list[[i]]$se <- se_vec[dim_1:dim_2]
}
return(return_list)
}
check_extensions <- function(mod_names) {
L <- length(mod_names)
for (i in 1:L) {
mod_names[[i]] <- strsplit(as.character(mod_names[[i]]), "_ijk")
mod_names[[i]] <- strsplit(as.character(mod_names[[i]]), "_ij")
}
return(mod_names)
}
##################################################################
###
### tryCatch functions and others for error handling / checking
###
##################################################################
get_network_from_formula <- function(form) {
result <- tryCatch(
expr = {
ergm.getnetwork(form)
},
error = function(err) {
cat("\n")
msg <- paste("The formula object provided to mlergm does not",
"contain a 'network' class object.\n",
"Formulas are specified: net ~ term1 + term2 + ...")
stop(msg, call. = FALSE)
},
warning = function(warn) {
warning(warn)
})
return(result)
}
get_terms_from_formula <- function(form, net) {
update.formula(form, net ~ .)
result <- tryCatch(
expr = {
terms <- as.character(form)[3]
sum_test <- summary(form)
return(terms)
},
error = function(err) {
bad_term <- str_match(as.character(err), "ERGM term (.*?) ")[2]
if (is.na(bad_term)) {
bad_covariate <- str_match(as.character(err), "ergm(.*?): (.*?) is")[3]
err$message <- paste0("Covariate ", bad_covariate, " not a valid covariate.",
" Please make sure that ", bad_covariate, " is a covariate of your network.")
} else {
err$message <- paste0("Model term ", bad_term, " not a valid model term.",
" Please reference 'help(ergm.terms)' for a list of",
" valid model terms.")
}
cat("\n")
stop(err, call. = FALSE)
},
warning = function(warn) {
warning(warn)
})
return(terms)
}
check_and_convert_memb <- function(memb) {
# Check if memb is a vector or can be converted to a vector
if (!is.vector(memb)) {
vec_memb <- tryCatch(
expr = {
as.vector(memb)
},
error = function(err) {
err$message <- paste0("Provided block memberships 'memb' not of class",
" 'vector' and not convertable to class 'vector'.")
cat("\n")
stop(err, call. = FALSE)
},
warning = function(warn) {
warning(warn)
})
} else {
vec_memb <- memb
}
# Now convert membership to numeric integer representation
converted_memb <- vec_memb
unique_labels <- unique(vec_memb)
iter <- 1
for (block_label in unique_labels) {
which_match <- which(block_label == vec_memb)
converted_memb[which_match] <- iter
iter <- iter + 1
}
return_list <- list(memb_labels = unique_labels,
memb_internal = converted_memb)
return(return_list)
}
check_net <- function(net) {
if (!is.network(net)) {
cat("\n")
stop("Left-hand side of provided formula does not contain a valid object of class 'network'.",
call. = FALSE)
}
}
make_net_list <- function(net, memb_internal) {
# Check that the dimensions of memb and net match
if (network.size(net) != length(memb_internal)) {
cat("\n")
stop("Number of nodes in network and length of block membership vector are not equal.",
call. = FALSE)
}
list_block_ind <- as.numeric(unique(memb_internal))
net_list <- rep(list(NULL), length(list_block_ind))
for (block_ind in list_block_ind) {
nodes_in_cur_block <- which(block_ind == memb_internal)
sub_net <- get.inducedSubgraph(net, v = nodes_in_cur_block)
net_list[[block_ind]] <- sub_net
}
return(net_list)
}
check_parameterization_type <- function(net_list, terms, parameterization, model) {
# Check sufficient statistic sizes for each block
block_statistic_dimensions <- numeric(length(net_list))
for (i in 1:length(net_list)) {
cur_net <- net_list[[i]]
form_ <- as.formula(paste("cur_net ~", terms))
block_statistic_dimensions[i] <- length(summary(form_))
}
which_largest <- which.max(block_statistic_dimensions)
largest_block <- net_list[[which_largest]]
form_ <- update(form_, largest_block ~ .)
statistic_names <- names(summary(form_))
model <- ergm_model(form_, largest_block)
eta_map <- model$etamap
model_dimension <- max(block_statistic_dimensions)
if (parameterization %in% c("standard", "offset", "size")) {
block_dims <- rep_row(rbind(seq(1, model_dimension)), length(net_list))
} else {
stop("Argument 'parameterization' must be either 'standard', 'offset', or 'size'.", call. = FALSE)
}
if (parameterization %in% c("offset")) {
param_names <- get_coef_names(model, !is.curved(model))
edge_ind <- which(param_names == "edges")
mutual_ind <- which(param_names == "mutual")
edge_loc <- ifelse(length(edge_ind) > 0, edge_ind, 0)
mutual_loc <- ifelse(length(mutual_ind) > 0, mutual_ind, 0)
if (edge_loc == 0) {
edge_loc <- NULL
}
if (mutual_loc == 0) {
mutual_loc <- NULL
}
} else {
edge_loc <- NULL
mutual_loc <- NULL
}
return_list <- list(model_dim = model_dimension,
model = model,
block_dims = block_dims,
eta_map = eta_map,
statistic_names = statistic_names,
edge_loc = edge_loc,
mutual_loc = mutual_loc,
which_largest = which_largest)
return(return_list)
}
get_coef_names <- function(model_obj, is_canonical) {
if(is_canonical) {
model_obj$coef.names
} else {
unlist(lapply(model_obj$terms,
function(term) {
find_first_non_null(names(term$params), term$coef.names)
}))
}
}
find_first_non_null <- function(...) {
for (x in list(...)) {
if (!is.null(x)) {
break
}
}
x
}
check_integer <- function(val, name) {
if (!is.numeric(val)) {
cat("\n")
stop(paste(name, "must be numeric."), call. = FALSE)
}
if (length(val) != 1) {
cat("\n")
stop(paste(name, "must be a single integer. Cannot supply multiple integers."), call. = FALSE)
}
if (!(val %% 1) == 0) {
cat("\n")
stop(paste(name, "must be an integer."), call. = FALSE)
}
if ((abs(val) > .Machine$integer.max)) {
cat("\n")
stop(paste(name, "provided is not a valid integer."), call. = FALSE)
}
}
msplit <- function(x, y) {
val <- suppressWarnings(split(x, y))
return(val)
}
remove_between_block_edges <- function(net, memb) {
index_mat <- matrix(TRUE, nrow = network.size(net), ncol = network.size(net))
u_memb <- unique(memb)
for (k in 1:length(u_memb)) {
v_ind <- which(memb == u_memb[k])
index_mat[v_ind, v_ind] <- FALSE
}
net[index_mat] <- 0
return(net)
}
reorder_block_matrix <- function(net_list) {
memb_vec <- numeric(0)
attr_names <- list.vertex.attributes(net_list[[1]])
v_attr <- rep(list(numeric(0)), length(attr_names))
net_mat <- matrix(0, nrow = 0, ncol = 0)
for (k in 1:length(net_list)) {
sub_net <- net_list[[k]]
for (i in 1:length(attr_names)) {
v_attr[[i]] <- c(v_attr[[i]], get.vertex.attribute(sub_net, attr_names[i]))
}
memb_vec <- c(memb_vec, rep(k, network.size(sub_net)))
net_mat <- bdiag(net_mat, sub_net[ , ])
}
net_mat <- as.matrix(net_mat)
net <- network(net_mat, directed = is.directed(net_list[[1]]))
for (i in 1:length(attr_names)) {
set.vertex.attribute(net, attr_names[i], v_attr[[i]])
}
set.vertex.attribute(net, "node_memb_group", memb_vec)
return(net)
}
adjust_formula <- function(form) {
all_vars <- str_trim(str_split(as.character(form)[3], "\\+")[[1]])
# Check if gw* terms are included without modifier
if (any(all_vars == "gwesp")) {
location <- which(all_vars == "gwesp")
all_vars[location] <- "gwesp(fixed = FALSE)"
}
if (any(all_vars == "gwodegree")) {
location <- which(all_vars == "gwodegree")
all_vars[location] <- "gwodegree(fixed = FALSE)"
}
if (any(all_vars == "gwidegree")) {
location <- which(all_vars == "gwidegree")
all_vars[location] <- "gwidegree(fixed = FALSE)"
}
if (any(all_vars == "gwdegree")) {
location <- which(all_vars == "gwdegree")
all_vars[location] <- "gwdegree(fixed = FALSE)"
}
# Put all the pieces back together
right_side_change <- paste("~", paste0(all_vars, collapse = " + "))
form <- update.formula(form, right_side_change)
return(form)
}
compute_pvalue <- function(obj) {
se <- sqrt(diag(solve(obj$est$info_mat)))
obj$se <- se
theta_est <- obj$est$theta
z_val <- theta_est / se
pvalue <- 2 * pnorm(-abs(z_val))
pvalue <- as.numeric(pvalue)
obj$pvalue <- pvalue
return(obj)
}
format_form_for_cat <- function(form, len = 10) {
all_vars <- str_trim(str_split(as.character(form)[3], "\\+")[[1]])
char_lens <- nchar(all_vars)
print_form <- paste0(as.character(form)[2] , " ~ ")
base_len <- nchar(print_form)
cur_len <- base_len
for (i in 1:length(all_vars)) {
print_form <- paste0(print_form, all_vars[i])
cur_len <- cur_len + char_lens[i]
if ((cur_len > 50) & (i < length(all_vars))) {
print_form <- paste0(print_form, "\n")
if (i < length(all_vars)) {
print_form <- paste0(print_form, paste0(rep(" ", base_len + len), collapse = ""), "+ ")
cur_len <- base_len
} else {
print_form <- paste0(print_form, paste0(rep(" ", base_len + len), collapse = ""))
}
} else {
if (i < length(all_vars)) {
print_form <- paste0(print_form, " + ")
cur_len <- cur_len + 3
}
}
}
print_form <- paste0(print_form, "\n")
return(print_form)
}
compute_bic <- function(obj) {
total_edges <- sapply(obj$net$clust_sizes,
function(x, dir_flag ) {
if (dir_flag) {
2 * choose(x, 2)
} else {
choose(x, 2)
}
},
dir_flag = obj$net$directed_flag)
total_edges <- sum(total_edges)
bic_val <- log(total_edges) * length(obj$est$theta) - 2 * obj$likval
return(bic_val)
}
compute_between_se <- function(eta1, eta2, num_dyads) {
if (!is.null(eta2)) {
covar_val <- matrix(0, nrow = 2, ncol = 2)
covar_val[1, 1] <- (2 * exp(eta1) + 2 * exp(2 * eta1 + eta2) + exp(3 * eta1 + eta2)) /
(1 + 2 * exp(eta1) + exp(2 * eta1 + eta2))^2
covar_val[2, 2] <- (exp(2 * eta1 + eta2) + 2 * exp(3 * eta1 + eta2)) /
(1 + 2 * exp(eta1) + exp(2 * eta1 + eta2))^2
covar_val[1, 2] <- covar_val[2, 1] <- covar_val[2, 2]
} else {
covar_val <- matrix(0, nrow = 1, ncol = 1)
covar_val[1, 1] <- exp(eta1) / (1 + exp(eta1))^2
}
covar_tot <- covar_val * num_dyads
se_val <- as.numeric(sqrt(diag(solve(covar_tot))))
return(se_val)
}
logit <- function(p) {
val <- log_fun(p / (1 - p))
return(val)
}
boxplot_fun <- function(dat_mat, line_dat = NULL, cutoff = NULL,
x_labels = NULL, x_angle = 0,
x_axis_label = NULL, y_axis_label = "Count",
plot_title = "", title_size = 18,
x_axis_size = NULL, y_axis_size = NULL,
axis_size = 12, axis_label_size = 14,
x_axis_label_size = NULL, y_axis_label_size = NULL,
line_size = 1, stat_name = NULL, pretty_x = FALSE) {
if (!is.null(line_dat)) {
if (length(line_dat) != ncol(dat_mat)) {
msg <- "Dimensions of 'line_dat' and 'dat_mat' must match"
msg <- paste(msg, "'line_dat' must be a vector of length equal")
msg <- paste(msg, "to the number of columns of 'dat_mat'.\n")
stop(msg, call. = FALSE)
}
}
if (!is.numeric(x_angle)) {
stop("Argument 'x_angle' must be numeric.\n", call. = FALSE)
} else if (length(x_angle) != 1) {
stop("Argument 'x_angle' must be of length 1.\n", call. = FALSE)
}
if (!is.numeric(line_size)) {
stop("Argument 'line_size' must be numeric.\n", call. = FALSE)
} else if (length(line_size) != 1) {
stop("Argument 'line_size' must be of length 1.\n", call. = FALSE)
} else if (line_size < 0) {
stop("Argument 'line_size' must be non-negative.\n", call. = FALSE)
}
if (is.null(x_axis_label)) {
x_axis_label <- stat_name
}
if (!(length(x_axis_label) == 1)) {
stop("Argument 'x_axis_label' is not a single character string.\n", call. = FALSE)
} else if (!is.character(x_axis_label)) {
stop("Argument 'x_axis_label' is not a character string.\n", call. = FALSE)
}
if (!(length(y_axis_label) == 1)) {
stop("Argument 'y_axis_label' is not a single character string.\n", call. = FALSE)
} else if (!is.character(y_axis_label)) {
stop("Argument 'y_axis_label' is not a character string.\n", call. = FALSE)
}
if (!(length(plot_title) == 1)) {
stop("Argument 'plot_title' is not a single character string.\n", call. = FALSE)
} else if (!is.character(plot_title)) {
stop("Argument 'plot_title' is not a character string.\n", call. = FALSE)
}
if (!is.numeric(title_size)) {
stop("Argument 'title_size' must be numeric.\n", call. = FALSE)
} else if (length(title_size) != 1) {
stop("Argument 'title_size' must be of length 1.\n", call. = FALSE)
} else if (title_size <= 0) {
stop("Argument 'title_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_label_size)) {
msg <- "Argument 'axis_label_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_label_size' and 'y_axis_label_size.\n")
stop(msg, call. = FALSE)
}
if (axis_label_size <= 0) {
stop("Argument 'axis_label_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_size)) {
msg <- "Argument 'axis_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_size' and 'y_axis_size.\n")
stop(msg, call. = FALSE)
}
if (axis_size <= 0) {
stop("Argument 'axis_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_label_size)) {
msg <- "Argument 'axis_label_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_label_size' and 'y_axis_label_size.\n")
stop(msg, call. = FALSE)
}
if (axis_label_size <= 0) {
stop("Argument 'axis_label_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_size)) {
msg <- "Argument 'axis_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_size' and 'y_axis_size.\n")
stop(msg, call. = FALSE)
}
if (axis_size <= 0) {
stop("Argument 'axis_size' must be a positive number.\n", call. = FALSE)
}
if (is.null(x_axis_label_size)) {
x_axis_label_size <- axis_label_size
} else {
if (!is.numeric(x_axis_label_size)) {
warning("Argument 'x_axis_label_size' not numeric. Using 'axis_label_size' instead.\n")
x_axis_label_size <- axis_label_size
} else if (!(length(x_axis_label_size) == 1)) {
warning("Argument 'x_axis_label_size' is not of length 1. Using 'axis_label_size instead.\n")
x_axis_label_size <- axis_label_size
} else if (x_axis_label_size <= 0) {
warning("Argument 'x_axis_label_size' not a positive number. Using 'axis_label_size' instead.\n")
x_axis_label_size <- axis_label_size
}
}
if (is.null(y_axis_label_size)) {
y_axis_label_size <- axis_label_size
} else {
if (!is.numeric(y_axis_label_size)) {
warning("Argument 'y_axis_label_size' not numeric. Using 'axis_label_size' instead.\n")
y_axis_label_size <- axis_label_size
} else if (!(length(y_axis_label_size) == 1)) {
warning("Argument 'y_axis_label_size' is not of length 1. Using 'axis_label_size' instead.\n")
y_axis_label_size <- axis_label_size
} else if (y_axis_label_size <= 0) {
warning("Argument 'y_axis_label_size' not a positive number. Using 'axis_label_size' instead.\n")
y_axis_label_size <- axis_label_size
}
}
if(is.null(x_axis_size)) {
x_axis_size <- axis_size
} else {
if (!is.numeric(x_axis_size)) {
warning("Argument 'x_axis_size' not numeric. Using 'axis_size' instead.\n")
x_axis_size <- axis_size
} else if (!(length(x_axis_size) == 1)) {
warning("Argument 'x_axis_size' is not of length 1. Using 'axis_size' instead.\n")
x_axis_size <- axis_size
} else if (x_axis_size <= 0) {
warning("Argument 'x_axis_size' not a positive number. Using 'axis_size' instead.\n")
x_axis_size <- axis_size
}
}
if (is.null(y_axis_size)) {
y_axis_size <- axis_size
} else {
if (!is.numeric(y_axis_size)) {
warning("Argument 'y_axis_size' not numeric. Using 'axis_size' instead.\n")
y_axis_size <- axis_size
} else if (!(length(y_axis_size) == 1)) {
warning("Argument 'y_axis_size' is not of length 1. Using 'axis_size' instead.\n")
y_axis_size <- axis_size
} else if (y_axis_size <= 0) {
warning("Argument 'y_axis_size' is not a positive number. Using 'axis_size' instead.\n")
y_axis_size <- axis_size
}
}
first_colname <- colnames(dat_mat)[1]
if (!is.null(x_labels) & !is.null(cutoff)) {
if (cutoff != length(x_labels)) {
stop("Value of argument 'cutoff' must be equal to length of 'x_labels'.\n", call. = FALSE)
}
if (grepl("0", first_colname)) {
dat_mat <- dat_mat[ , 1:(cutoff + 1)]
} else {
dat_mat <- dat_mat[ , 1:cutoff]
}
} else if (!is.null(x_labels)) {
if (length(x_labels) != ncol(dat_mat)) {
msg <- "Dimensions of 'x_labels' and 'dat_mat' must match"
msg <- paste(msg, "'x_labels' must be a vector of character labels equal")
msg <- paste(msg, "to the number of columns of 'dat_mat'.\n")
stop(msg, call. = FALSE)
}
x_breaks <- 1:ncol(dat_mat)
} else {
if (!is.null(cutoff)) {
if (grepl("0", first_colname)) {
dat_mat <- dat_mat[ , 1:(cutoff + 1)]
} else {
dat_mat <- dat_mat[ , 1:cutoff]
}
}
x_breaks <- 1:ncol(dat_mat)
if (grepl("0", first_colname)) {
x_labels <- as.character(0:(length(x_breaks - 1)))
} else {
x_labels <- as.character(x_breaks)
}
if (pretty_x) {
pretty_labels <- as.character(pretty(as.numeric(x_labels), n = 5))
x_labels[!(x_labels %in% pretty_labels)] <- ""
}
}
dat_mat_colnames <- colnames(dat_mat)
if (is.null(cutoff)) {
cutoff <- ncol(dat_mat)
}
dat_mat <- melt(dat_mat)[ , 2:3]
colnames(dat_mat) <- c("group", "values")
dat_mat$group <- factor(dat_mat$group, levels = dat_mat_colnames)
if (!is.null(line_dat)) {
if (length(line_dat) > cutoff) {
if (grepl("0", first_colname)) {
line_dat <- line_dat[1:(cutoff + 1)]
} else {
line_dat <- line_dat[1:cutoff]
}
}
} else {
line_dat <- matrix(0, nrow = 0, ncol = ncol(dat_mat))
}
names(line_dat) <- dat_mat_colnames
line_dat <- melt(t(as.matrix(line_dat)))[ , 2:3]
colnames(line_dat) <- c("group", "values")
y_breaks <- pretty(dat_mat$values)
y_labels <- as.character(y_breaks)
geom_id <- c(rep("box", nrow(dat_mat)), rep("line", nrow(line_dat)))
box_dat <- as.data.frame(cbind(rbind(dat_mat, line_dat), geom_id))
# NULL out aes() inputs to appease CRAN check
group <- values <- NULL
plot_ <- ggplot() +
geom_boxplot(data = subset(box_dat, geom_id == "box"),
aes(x = group, y = values), outlier.color = "NA") +
geom_line(data = subset(box_dat, geom_id == "line"),
aes(x = 1:length(x_breaks), y = values),
color = "red", size = line_size) +
theme_classic() +
labs(title = plot_title) +
xlab(x_axis_label) +
ylab(y_axis_label) +
theme(axis.title.x = element_text(family = "Times",
size = x_axis_label_size,
colour = "Black",
vjust = 0.5)) +
theme(axis.title.y = element_text(family = "Times",
size = y_axis_label_size,
colour = "Black",
margin = margin(r = 10))) +
theme(plot.title = element_text(family = "Times",
size = title_size,
colour = "Black",
vjust = 1)) +
theme(axis.text.x = element_text(color = "black",
family = "Times",
size = x_axis_size,
angle = x_angle,
vjust = 0.2,
hjust = 0.8)) +
theme(axis.text.y = element_text(color = "black",
size = y_axis_size,
family = "Times")) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank()) +
theme(legend.position = "none") +
scale_x_discrete(labels = x_labels) +
scale_y_continuous(expand = c(0, 1),
breaks = y_breaks)
return(plot_)
}
histplot_fun <- function(dat_mat, line_dat = NULL,
x_axis_label = NULL, y_axis_label = "Count",
plot_title = "", title_size = 18,
axis_label_size = 16, axis_size = 14, line_size = 1,
x_axis_label_size = NULL, y_axis_label_size = NULL,
x_axis_size = NULL, y_axis_size = NULL,
stat_name = NULL) {
if (!is.numeric(dat_mat)) {
stop("Argument 'dat_mat' must be numeric.\n", call. = FALSE)
} else if (!is.vector(dat_mat)) {
stop("Argument 'dat_mat' must be a vector.", call. = FALSE)
}
if (!is.null(line_dat)) {
if (!is.numeric(line_dat)) {
stop("Argument 'line_dat' must be numeric.\n", call. = FALSE)
} else if (!is.vector(line_dat)) {
stop("Argument 'line_dat' must be a single number.\n", call. = FALSE)
} else if (length(line_dat) != 1) {
stop("Argument 'line_dat' must be a single number.\n", call. = FALSE)
}
}
if (!is.numeric(line_size)) {
stop("Argument 'line_size' must be numeric.\n", call. = FALSE)
} else if (length(line_size) != 1) {
stop("Argument 'line_size' must be of length 1.\n", call. = FALSE)
} else if (line_size < 0) {
stop("Argument 'line_size' must be non-negative.\n", call. = FALSE)
}
if (is.null(x_axis_label)) {
x_axis_label <- stat_name
}
if (!(length(x_axis_label) == 1)) {
stop("Argument 'x_axis_label' is not a single character string.\n", call. = FALSE)
} else if (!is.character(x_axis_label)) {
stop("Argument 'x_axis_label' is not a character string.\n", call. = FALSE)
}
if (!(length(y_axis_label) == 1)) {
stop("Argument 'y_axis_label' is not a single character string.\n", call. = FALSE)
} else if (!is.character(y_axis_label)) {
stop("Argument 'y_axis_label' is not a character string.\n", call. = FALSE)
}
if (!(length(plot_title) == 1)) {
stop("Argument 'plot_title' is not a single character string.\n", call. = FALSE)
} else if (!is.character(plot_title)) {
stop("Argument 'plot_title' is not a character string.\n", call. = FALSE)
}
if (!is.numeric(title_size)) {
stop("Argument 'title_size' must be numeric.\n", call. = FALSE)
} else if (length(title_size) != 1) {
stop("Argument 'title_size' must be of length 1.\n", call. = FALSE)
} else if (title_size <= 0) {
stop("Argument 'title_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_label_size)) {
msg <- "Argument 'axis_label_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_label_size' and 'y_axis_label_size.\n")
stop(msg, call. = FALSE)
}
if (axis_label_size <= 0) {
stop("Argument 'axis_label_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_size)) {
msg <- "Argument 'axis_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_size' and 'y_axis_size.\n")
stop(msg, call. = FALSE)
}
if (axis_size <= 0) {
stop("Argument 'axis_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_label_size)) {
msg <- "Argument 'axis_label_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_label_size' and 'y_axis_label_size.\n")
stop(msg, call. = FALSE)
}
if (axis_label_size <= 0) {
stop("Argument 'axis_label_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_size)) {
msg <- "Argument 'axis_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_size' and 'y_axis_size.\n")
stop(msg, call. = FALSE)
}
if (axis_size <= 0) {
stop("Argument 'axis_size' must be a positive number.\n", call. = FALSE)
}
if (is.null(x_axis_label_size)) {
x_axis_label_size <- axis_label_size
} else {
if (!is.numeric(x_axis_label_size)) {
warning("Argument 'x_axis_label_size' not numeric. Using 'axis_label_size' instead.\n")
x_axis_label_size <- axis_label_size
} else if (!(length(x_axis_label_size) == 1)) {
warning("Argument 'x_axis_label_size' is not of length 1. Using 'axis_label_size instead.\n")
x_axis_label_size <- axis_label_size
} else if (x_axis_label_size <= 0) {
warning("Argument 'x_axis_label_size' not a positive number. Using 'axis_label_size' instead.\n")
x_axis_label_size <- axis_label_size
}
}
if (is.null(y_axis_label_size)) {
y_axis_label_size <- axis_label_size
} else {
if (!is.numeric(y_axis_label_size)) {
warning("Argument 'y_axis_label_size' not numeric. Using 'axis_label_size' instead.\n")
y_axis_label_size <- axis_label_size
} else if (!(length(y_axis_label_size) == 1)) {
warning("Argument 'y_axis_label_size' is not of length 1. Using 'axis_label_size' instead.\n")
y_axis_label_size <- axis_label_size
} else if (y_axis_label_size <= 0) {
warning("Argument 'y_axis_label_size' not a positive number. Using 'axis_label_size' instead.\n")
y_axis_label_size <- axis_label_size
}
}
if(is.null(x_axis_size)) {
x_axis_size <- axis_size
} else {
if (!is.numeric(x_axis_size)) {
warning("Argument 'x_axis_size' not numeric. Using 'axis_size' instead.\n")
x_axis_size <- axis_size
} else if (!(length(x_axis_size) == 1)) {
warning("Argument 'x_axis_size' is not of length 1. Using 'axis_size' instead.\n")
x_axis_size <- axis_size
} else if (x_axis_size <= 0) {
warning("Argument 'x_axis_size' not a positive number. Using 'axis_size' instead.\n")
x_axis_size <- axis_size
}
}
if (is.null(y_axis_size)) {
y_axis_size <- axis_size
} else {
if (!is.numeric(y_axis_size)) {
warning("Argument 'y_axis_size' not numeric. Using 'axis_size' instead.\n")
y_axis_size <- axis_size
} else if (!(length(y_axis_size) == 1)) {
warning("Argument 'y_axis_size' is not of length 1. Using 'axis_size' instead.\n")
y_axis_size <- axis_size
} else if (y_axis_size <= 0) {
warning("Argument 'y_axis_size' is not a positive number. Using 'axis_size' instead.\n")
y_axis_size <- axis_size
}
}
# Obtain histogram breaks using David Scott's binwidth rule
hist_values <- hist(dat_mat, plot = FALSE, breaks = "Scott")
hist_breaks <- diff(hist_values$breaks)[1]
if (is.null(line_dat)) {
line_dat <- matrix(0, nrow = 0, ncol = ncol(dat_mat))
}
y_breaks <- pretty(hist_values$counts)
y_labels <- as.character(y_breaks)
x_breaks <- pretty(dat_mat)
x_labels <- as.character(x_breaks)
geom_id <- c(rep("hist", length(dat_mat)), rep("line", 1))
hist_values <- c(dat_mat, line_dat)
hist_dat <- as.data.frame(cbind(hist_values, geom_id))
rownames(hist_dat) <- NULL
colnames(hist_dat) <- c("values", "geom_id")
hist_dat$values <- as.numeric(hist_dat$values)
#hist_dat$values <- as.numeric(levels(hist_dat$values))[hist_dat$values]
# NULL out the aes() inputs to appease CRAN check
values <- NULL
plot_ <- ggplot() +
geom_histogram(data = subset(hist_dat, geom_id == "hist"),
aes(values), binwidth = hist_breaks,
fill = "grey75", color = "grey25") +
geom_vline(data = subset(hist_dat, geom_id == "line"),
aes(xintercept = values),
color = "red", size = line_size) +
theme_classic() +
labs(title = plot_title) +
xlab(x_axis_label) +
ylab(y_axis_label) +
theme(axis.title.x = element_text(family = "Times",
size = x_axis_label_size,
colour = "Black",
vjust = 0.5)) +
theme(axis.title.y = element_text(family = "Times",
size = y_axis_label_size,
colour = "Black",
margin = margin(r = 10))) +
theme(plot.title = element_text(family = "Times",
size = title_size,
colour = "Black",
vjust = 1)) +
theme(axis.text.x = element_text(color = "black",
family = "Times",
size = x_axis_size,
vjust = 0.2,
hjust = 0.8)) +
theme(axis.text.y = element_text(color = "black",
size = y_axis_size,
family = "Times")) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank()) +
theme(legend.position = "none") +
scale_x_continuous(breaks = x_breaks,
labels = x_labels) +
scale_y_continuous(expand = c(0, 0),
breaks = y_breaks)
return(plot_)
}
check_terms <- function(form, K) {
check_formula(form)
all_vars <- all.vars(form, functions = TRUE)
all_vars <- all_vars[!(all_vars %in% c("-", "+", "~", ":"))]
all_vars <- all_vars[-1]
allowable_terms <- c("edges",
"mutual",
"gwesp",
"dgwesp",
"gwdegree",
"gwodegree",
"gwidegree",
"triangle",
"nodematch",
"transitiveties",
"cycle",
"ttriple",
"ctriple",
"ddsp",
"degree",
"desp",
"gwdsp",
"dsp",
"esp",
"isolates",
"kstar",
"istar",
"nodefactor",
"nodeifactor",
"nodeofactor",
"nodemix",
"nodecov",
"nodeicov",
"nodeocov",
"edgecov",
"idegree",
"odegree",
"ostar",
"twopath",
"absdiff")
if (K == 1) {
allowable_terms <- c(allowable_terms, "sender", "receiver", "sociality")
}
check_terms <- all_vars %in% allowable_terms
if (any(check_terms == FALSE)) {
location <- which(check_terms == FALSE)
msg <- "The following terms are not supported at this time: "
for (i in 1:length(location)) {
cur_loc <- location[i]
if (i < length(location)) {
msg <- paste0(msg, all_vars[cur_loc], ", ")
} else {
msg <- paste0(msg, all_vars[cur_loc], ".\n")
}
}
stop(msg, call. = FALSE)
}
}
check_formula <- function(form) {
if (!is.formula(form)) {
stop("Argument 'form' must be a 'formula' class object.\n", call. = FALSE)
}
can_get_network <- tryCatch(ergm.getnetwork(form),
error = function(err) { return(err) })
if (!is.network(can_get_network)) {
stop("Cannot extract network from formula provided. Check that a valid formula was specified.", call. = FALSE)
}
}
is.formula <- function(form) {
res <- "formula" %in% is(form)
return(res)
}
| /R/helper_functions.R | no_license | cran/mlergm | R | false | false | 39,858 | r |
###########################
###
### Basic util functions
###
###########################
clean_mem <- function() {
invisible(gc())
}
norm2 <- function(vec) {
val <- norm(as.matrix(vec), type = "2")
return(val)
}
norm1 <- function(vec) {
val <- norm(as.matrix(vec), type = "1")
return(val)
}
rep_row <- function(x, n) {
matrix(rep(x, each = n), nrow = n)
}
exp_fun <- function(x) {
val <- exp(x)
if (length(val) > 1) {
for (i in 1:length(val)) {
if (val[i] > .Machine$double.xmax) {
val[i] <- .Machine$double.xmax
}
}
} else {
if (val > .Machine$double.xmax) {
val <- .Machine$double.xmax
}
}
return(val)
}
log_fun <- function(x) {
val <- log(x)
if (abs(val) > .Machine$double.xmax) {
val <- sign(val) * .Machine$double.xmax
}
if (abs(val) < .Machine$double.xmin) {
val <- sign(val) * .Machine$double.xmin
}
return(val)
}
outer_fun <- function(v) {
outer(v, v)
}
# Sort and order the blocks by size
sort_blocks <- function(net_list, mod_names) {
# Get the sizes of each block
n_ <- numeric(length(net_list))
for (i in 1:length(net_list)) {
n_[i] <- net_list[[i]]$gal$n
}
sort_order <- order(n_)
net_list_order <- rep(list(NULL), length(net_list))
mod_names_order <- rep(list(NULL), length(mod_names))
for (i in 1:length(net_list)) {
net_list_order[[i]] <- net_list[[sort_order[i]]]
mod_names_order[[i]] <- mod_names[[sort_order[i]]]
}
return(list(net_list = net_list_order, mod_names = mod_names_order, sort_order = sort_order))
}
dim_fun <- function(n_obj, n_groups, eta_len) {
sizes <- split_blocks(n_obj, n_groups)
dims <- matrix(0, nrow = n_obj, ncol = eta_len)
for (i in 1:n_groups) {
num_ <- sizes[i]
dim_1 <- 1 + sum(sizes[seq_spec(i, adjust = -1)])
dim_2 <- sum(sizes[1:i])
dims[dim_1:dim_2, ] <-
rep_row(seq(1 + (i - 1) * eta_len, eta_len * i), sizes[i])
}
return(list(dims = dims, sizes = sizes))
}
split_blocks <- function(n_obj, n_groups) {
base_ <- n_obj %/% n_groups
left_ <- n_obj %% n_groups
sizes <- rep(base_, n_groups) + c(rep(1, left_), rep(0, n_groups - left_))
return(sizes)
}
seq_spec <- function(i, adjust = 0) {
if (i == 1) {
return(numeric(0))
} else {
return(1:(i + adjust))
}
}
make_eta_fun <- function(num_group, parameterization) {
if (parameterization == "multi_group") {
eta_fun <- function(eta) {
num_ <- 1
len_ <- length(eta) / num_
eta_base <- eta[1:len_]
eta_val <- eta_base
for (i in 2:num_) {
dim_1 <- 1 + len_ * (i - 1)
dim_2 <- len_ * i
cur_val <- eta_base + eta[dim_1:dim_2]
eta_val <- c(eta_val, cur_val)
}
return(eta_val)
}
body(eta_fun)[[2]] <- substitute(num_ <- num_group,
list(num_group = num_group))
} else if (parameterization == "size") {
eta_fun <- function(eta) {
return(eta)
}
}
return(eta_fun)
}
make_eta_grad <- function(num_group, parameterization) {
if (parameterization == "multi_group") {
eta_grad <- function(eta) {
num_ <- 1
len_ <- length(eta) / num_
eta_grad_val <- diag(len_)
for (i in 2:num_) {
eta_grad_val <- as.matrix(bdiag(eta_grad_val, diag(len_)))
}
eta_grad_val[ , 1:len_] <- rbind(t(matrix(rep(diag(len_), num_group),
nrow = len_,
ncol = num_group * len_)))
return(eta_grad_val)
}
body(eta_grad)[[2]] <- substitute(num_ <- num_group,
list(num_group = num_group))
} else if (parameterization == "size") {
eta_grad <- function(eta) {
return(diag(length(eta)))
}
}
return(eta_grad)
}
assign_labels <- function(K, sizes) {
labels <- numeric(K)
size_ <- c(0, sizes)
for (i in 1:K) {
labels[i] <- max(which(i > cumsum(size_)))
}
return(labels)
}
make_return_obj <- function(obj, labels, sort_order) {
n_ <- length(unique(labels))
return_list <- rep(list(NULL), n_)
len_ <- length(obj$est$eta) / n_
names(return_list) <- sprintf("group%i", 1:n_)
grad <- obj$est$eta_grad(obj$est$eta)
info_mat <- t(solve(grad)) %*% obj$est$info_mat %*% solve(grad)
se_vec <- sqrt(diag(solve(info_mat)))
for (i in 1:n_) {
return_list[[i]] <- list(labels = NULL, estimates = NULL, se = NULL)
return_list[[i]]$labels <- sort(sort_order[labels == i])
dim_1 <- 1 + len_ * (i - 1)
dim_2 <- len_ * i
return_list[[i]]$estimates <- obj$est$eta_fun(obj$est$eta)[dim_1:dim_2]
return_list[[i]]$se <- se_vec[dim_1:dim_2]
}
return(return_list)
}
check_extensions <- function(mod_names) {
L <- length(mod_names)
for (i in 1:L) {
mod_names[[i]] <- strsplit(as.character(mod_names[[i]]), "_ijk")
mod_names[[i]] <- strsplit(as.character(mod_names[[i]]), "_ij")
}
return(mod_names)
}
##################################################################
###
### tryCatch functions and others for error handling / checking
###
##################################################################
get_network_from_formula <- function(form) {
result <- tryCatch(
expr = {
ergm.getnetwork(form)
},
error = function(err) {
cat("\n")
msg <- paste("The formula object provided to mlergm does not",
"contain a 'network' class object.\n",
"Formulas are specified: net ~ term1 + term2 + ...")
stop(msg, call. = FALSE)
},
warning = function(warn) {
warning(warn)
})
return(result)
}
get_terms_from_formula <- function(form, net) {
update.formula(form, net ~ .)
result <- tryCatch(
expr = {
terms <- as.character(form)[3]
sum_test <- summary(form)
return(terms)
},
error = function(err) {
bad_term <- str_match(as.character(err), "ERGM term (.*?) ")[2]
if (is.na(bad_term)) {
bad_covariate <- str_match(as.character(err), "ergm(.*?): (.*?) is")[3]
err$message <- paste0("Covariate ", bad_covariate, " not a valid covariate.",
" Please make sure that ", bad_covariate, " is a covariate of your network.")
} else {
err$message <- paste0("Model term ", bad_term, " not a valid model term.",
" Please reference 'help(ergm.terms)' for a list of",
" valid model terms.")
}
cat("\n")
stop(err, call. = FALSE)
},
warning = function(warn) {
warning(warn)
})
return(terms)
}
check_and_convert_memb <- function(memb) {
# Check if memb is a vector or can be converted to a vector
if (!is.vector(memb)) {
vec_memb <- tryCatch(
expr = {
as.vector(memb)
},
error = function(err) {
err$message <- paste0("Provided block memberships 'memb' not of class",
" 'vector' and not convertable to class 'vector'.")
cat("\n")
stop(err, call. = FALSE)
},
warning = function(warn) {
warning(warn)
})
} else {
vec_memb <- memb
}
# Now convert membership to numeric integer representation
converted_memb <- vec_memb
unique_labels <- unique(vec_memb)
iter <- 1
for (block_label in unique_labels) {
which_match <- which(block_label == vec_memb)
converted_memb[which_match] <- iter
iter <- iter + 1
}
return_list <- list(memb_labels = unique_labels,
memb_internal = converted_memb)
return(return_list)
}
check_net <- function(net) {
if (!is.network(net)) {
cat("\n")
stop("Left-hand side of provided formula does not contain a valid object of class 'network'.",
call. = FALSE)
}
}
make_net_list <- function(net, memb_internal) {
# Check that the dimensions of memb and net match
if (network.size(net) != length(memb_internal)) {
cat("\n")
stop("Number of nodes in network and length of block membership vector are not equal.",
call. = FALSE)
}
list_block_ind <- as.numeric(unique(memb_internal))
net_list <- rep(list(NULL), length(list_block_ind))
for (block_ind in list_block_ind) {
nodes_in_cur_block <- which(block_ind == memb_internal)
sub_net <- get.inducedSubgraph(net, v = nodes_in_cur_block)
net_list[[block_ind]] <- sub_net
}
return(net_list)
}
check_parameterization_type <- function(net_list, terms, parameterization, model) {
# Check sufficient statistic sizes for each block
block_statistic_dimensions <- numeric(length(net_list))
for (i in 1:length(net_list)) {
cur_net <- net_list[[i]]
form_ <- as.formula(paste("cur_net ~", terms))
block_statistic_dimensions[i] <- length(summary(form_))
}
which_largest <- which.max(block_statistic_dimensions)
largest_block <- net_list[[which_largest]]
form_ <- update(form_, largest_block ~ .)
statistic_names <- names(summary(form_))
model <- ergm_model(form_, largest_block)
eta_map <- model$etamap
model_dimension <- max(block_statistic_dimensions)
if (parameterization %in% c("standard", "offset", "size")) {
block_dims <- rep_row(rbind(seq(1, model_dimension)), length(net_list))
} else {
stop("Argument 'parameterization' must be either 'standard', 'offset', or 'size'.", call. = FALSE)
}
if (parameterization %in% c("offset")) {
param_names <- get_coef_names(model, !is.curved(model))
edge_ind <- which(param_names == "edges")
mutual_ind <- which(param_names == "mutual")
edge_loc <- ifelse(length(edge_ind) > 0, edge_ind, 0)
mutual_loc <- ifelse(length(mutual_ind) > 0, mutual_ind, 0)
if (edge_loc == 0) {
edge_loc <- NULL
}
if (mutual_loc == 0) {
mutual_loc <- NULL
}
} else {
edge_loc <- NULL
mutual_loc <- NULL
}
return_list <- list(model_dim = model_dimension,
model = model,
block_dims = block_dims,
eta_map = eta_map,
statistic_names = statistic_names,
edge_loc = edge_loc,
mutual_loc = mutual_loc,
which_largest = which_largest)
return(return_list)
}
get_coef_names <- function(model_obj, is_canonical) {
if(is_canonical) {
model_obj$coef.names
} else {
unlist(lapply(model_obj$terms,
function(term) {
find_first_non_null(names(term$params), term$coef.names)
}))
}
}
find_first_non_null <- function(...) {
for (x in list(...)) {
if (!is.null(x)) {
break
}
}
x
}
check_integer <- function(val, name) {
if (!is.numeric(val)) {
cat("\n")
stop(paste(name, "must be numeric."), call. = FALSE)
}
if (length(val) != 1) {
cat("\n")
stop(paste(name, "must be a single integer. Cannot supply multiple integers."), call. = FALSE)
}
if (!(val %% 1) == 0) {
cat("\n")
stop(paste(name, "must be an integer."), call. = FALSE)
}
if ((abs(val) > .Machine$integer.max)) {
cat("\n")
stop(paste(name, "provided is not a valid integer."), call. = FALSE)
}
}
msplit <- function(x, y) {
val <- suppressWarnings(split(x, y))
return(val)
}
remove_between_block_edges <- function(net, memb) {
index_mat <- matrix(TRUE, nrow = network.size(net), ncol = network.size(net))
u_memb <- unique(memb)
for (k in 1:length(u_memb)) {
v_ind <- which(memb == u_memb[k])
index_mat[v_ind, v_ind] <- FALSE
}
net[index_mat] <- 0
return(net)
}
reorder_block_matrix <- function(net_list) {
memb_vec <- numeric(0)
attr_names <- list.vertex.attributes(net_list[[1]])
v_attr <- rep(list(numeric(0)), length(attr_names))
net_mat <- matrix(0, nrow = 0, ncol = 0)
for (k in 1:length(net_list)) {
sub_net <- net_list[[k]]
for (i in 1:length(attr_names)) {
v_attr[[i]] <- c(v_attr[[i]], get.vertex.attribute(sub_net, attr_names[i]))
}
memb_vec <- c(memb_vec, rep(k, network.size(sub_net)))
net_mat <- bdiag(net_mat, sub_net[ , ])
}
net_mat <- as.matrix(net_mat)
net <- network(net_mat, directed = is.directed(net_list[[1]]))
for (i in 1:length(attr_names)) {
set.vertex.attribute(net, attr_names[i], v_attr[[i]])
}
set.vertex.attribute(net, "node_memb_group", memb_vec)
return(net)
}
adjust_formula <- function(form) {
all_vars <- str_trim(str_split(as.character(form)[3], "\\+")[[1]])
# Check if gw* terms are included without modifier
if (any(all_vars == "gwesp")) {
location <- which(all_vars == "gwesp")
all_vars[location] <- "gwesp(fixed = FALSE)"
}
if (any(all_vars == "gwodegree")) {
location <- which(all_vars == "gwodegree")
all_vars[location] <- "gwodegree(fixed = FALSE)"
}
if (any(all_vars == "gwidegree")) {
location <- which(all_vars == "gwidegree")
all_vars[location] <- "gwidegree(fixed = FALSE)"
}
if (any(all_vars == "gwdegree")) {
location <- which(all_vars == "gwdegree")
all_vars[location] <- "gwdegree(fixed = FALSE)"
}
# Put all the pieces back together
right_side_change <- paste("~", paste0(all_vars, collapse = " + "))
form <- update.formula(form, right_side_change)
return(form)
}
compute_pvalue <- function(obj) {
se <- sqrt(diag(solve(obj$est$info_mat)))
obj$se <- se
theta_est <- obj$est$theta
z_val <- theta_est / se
pvalue <- 2 * pnorm(-abs(z_val))
pvalue <- as.numeric(pvalue)
obj$pvalue <- pvalue
return(obj)
}
format_form_for_cat <- function(form, len = 10) {
all_vars <- str_trim(str_split(as.character(form)[3], "\\+")[[1]])
char_lens <- nchar(all_vars)
print_form <- paste0(as.character(form)[2] , " ~ ")
base_len <- nchar(print_form)
cur_len <- base_len
for (i in 1:length(all_vars)) {
print_form <- paste0(print_form, all_vars[i])
cur_len <- cur_len + char_lens[i]
if ((cur_len > 50) & (i < length(all_vars))) {
print_form <- paste0(print_form, "\n")
if (i < length(all_vars)) {
print_form <- paste0(print_form, paste0(rep(" ", base_len + len), collapse = ""), "+ ")
cur_len <- base_len
} else {
print_form <- paste0(print_form, paste0(rep(" ", base_len + len), collapse = ""))
}
} else {
if (i < length(all_vars)) {
print_form <- paste0(print_form, " + ")
cur_len <- cur_len + 3
}
}
}
print_form <- paste0(print_form, "\n")
return(print_form)
}
compute_bic <- function(obj) {
total_edges <- sapply(obj$net$clust_sizes,
function(x, dir_flag ) {
if (dir_flag) {
2 * choose(x, 2)
} else {
choose(x, 2)
}
},
dir_flag = obj$net$directed_flag)
total_edges <- sum(total_edges)
bic_val <- log(total_edges) * length(obj$est$theta) - 2 * obj$likval
return(bic_val)
}
compute_between_se <- function(eta1, eta2, num_dyads) {
if (!is.null(eta2)) {
covar_val <- matrix(0, nrow = 2, ncol = 2)
covar_val[1, 1] <- (2 * exp(eta1) + 2 * exp(2 * eta1 + eta2) + exp(3 * eta1 + eta2)) /
(1 + 2 * exp(eta1) + exp(2 * eta1 + eta2))^2
covar_val[2, 2] <- (exp(2 * eta1 + eta2) + 2 * exp(3 * eta1 + eta2)) /
(1 + 2 * exp(eta1) + exp(2 * eta1 + eta2))^2
covar_val[1, 2] <- covar_val[2, 1] <- covar_val[2, 2]
} else {
covar_val <- matrix(0, nrow = 1, ncol = 1)
covar_val[1, 1] <- exp(eta1) / (1 + exp(eta1))^2
}
covar_tot <- covar_val * num_dyads
se_val <- as.numeric(sqrt(diag(solve(covar_tot))))
return(se_val)
}
logit <- function(p) {
val <- log_fun(p / (1 - p))
return(val)
}
boxplot_fun <- function(dat_mat, line_dat = NULL, cutoff = NULL,
x_labels = NULL, x_angle = 0,
x_axis_label = NULL, y_axis_label = "Count",
plot_title = "", title_size = 18,
x_axis_size = NULL, y_axis_size = NULL,
axis_size = 12, axis_label_size = 14,
x_axis_label_size = NULL, y_axis_label_size = NULL,
line_size = 1, stat_name = NULL, pretty_x = FALSE) {
if (!is.null(line_dat)) {
if (length(line_dat) != ncol(dat_mat)) {
msg <- "Dimensions of 'line_dat' and 'dat_mat' must match"
msg <- paste(msg, "'line_dat' must be a vector of length equal")
msg <- paste(msg, "to the number of columns of 'dat_mat'.\n")
stop(msg, call. = FALSE)
}
}
if (!is.numeric(x_angle)) {
stop("Argument 'x_angle' must be numeric.\n", call. = FALSE)
} else if (length(x_angle) != 1) {
stop("Argument 'x_angle' must be of length 1.\n", call. = FALSE)
}
if (!is.numeric(line_size)) {
stop("Argument 'line_size' must be numeric.\n", call. = FALSE)
} else if (length(line_size) != 1) {
stop("Argument 'line_size' must be of length 1.\n", call. = FALSE)
} else if (line_size < 0) {
stop("Argument 'line_size' must be non-negative.\n", call. = FALSE)
}
if (is.null(x_axis_label)) {
x_axis_label <- stat_name
}
if (!(length(x_axis_label) == 1)) {
stop("Argument 'x_axis_label' is not a single character string.\n", call. = FALSE)
} else if (!is.character(x_axis_label)) {
stop("Argument 'x_axis_label' is not a character string.\n", call. = FALSE)
}
if (!(length(y_axis_label) == 1)) {
stop("Argument 'y_axis_label' is not a single character string.\n", call. = FALSE)
} else if (!is.character(y_axis_label)) {
stop("Argument 'y_axis_label' is not a character string.\n", call. = FALSE)
}
if (!(length(plot_title) == 1)) {
stop("Argument 'plot_title' is not a single character string.\n", call. = FALSE)
} else if (!is.character(plot_title)) {
stop("Argument 'plot_title' is not a character string.\n", call. = FALSE)
}
if (!is.numeric(title_size)) {
stop("Argument 'title_size' must be numeric.\n", call. = FALSE)
} else if (length(title_size) != 1) {
stop("Argument 'title_size' must be of length 1.\n", call. = FALSE)
} else if (title_size <= 0) {
stop("Argument 'title_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_label_size)) {
msg <- "Argument 'axis_label_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_label_size' and 'y_axis_label_size.\n")
stop(msg, call. = FALSE)
}
if (axis_label_size <= 0) {
stop("Argument 'axis_label_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_size)) {
msg <- "Argument 'axis_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_size' and 'y_axis_size.\n")
stop(msg, call. = FALSE)
}
if (axis_size <= 0) {
stop("Argument 'axis_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_label_size)) {
msg <- "Argument 'axis_label_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_label_size' and 'y_axis_label_size.\n")
stop(msg, call. = FALSE)
}
if (axis_label_size <= 0) {
stop("Argument 'axis_label_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_size)) {
msg <- "Argument 'axis_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_size' and 'y_axis_size.\n")
stop(msg, call. = FALSE)
}
if (axis_size <= 0) {
stop("Argument 'axis_size' must be a positive number.\n", call. = FALSE)
}
if (is.null(x_axis_label_size)) {
x_axis_label_size <- axis_label_size
} else {
if (!is.numeric(x_axis_label_size)) {
warning("Argument 'x_axis_label_size' not numeric. Using 'axis_label_size' instead.\n")
x_axis_label_size <- axis_label_size
} else if (!(length(x_axis_label_size) == 1)) {
warning("Argument 'x_axis_label_size' is not of length 1. Using 'axis_label_size instead.\n")
x_axis_label_size <- axis_label_size
} else if (x_axis_label_size <= 0) {
warning("Argument 'x_axis_label_size' not a positive number. Using 'axis_label_size' instead.\n")
x_axis_label_size <- axis_label_size
}
}
if (is.null(y_axis_label_size)) {
y_axis_label_size <- axis_label_size
} else {
if (!is.numeric(y_axis_label_size)) {
warning("Argument 'y_axis_label_size' not numeric. Using 'axis_label_size' instead.\n")
y_axis_label_size <- axis_label_size
} else if (!(length(y_axis_label_size) == 1)) {
warning("Argument 'y_axis_label_size' is not of length 1. Using 'axis_label_size' instead.\n")
y_axis_label_size <- axis_label_size
} else if (y_axis_label_size <= 0) {
warning("Argument 'y_axis_label_size' not a positive number. Using 'axis_label_size' instead.\n")
y_axis_label_size <- axis_label_size
}
}
if(is.null(x_axis_size)) {
x_axis_size <- axis_size
} else {
if (!is.numeric(x_axis_size)) {
warning("Argument 'x_axis_size' not numeric. Using 'axis_size' instead.\n")
x_axis_size <- axis_size
} else if (!(length(x_axis_size) == 1)) {
warning("Argument 'x_axis_size' is not of length 1. Using 'axis_size' instead.\n")
x_axis_size <- axis_size
} else if (x_axis_size <= 0) {
warning("Argument 'x_axis_size' not a positive number. Using 'axis_size' instead.\n")
x_axis_size <- axis_size
}
}
if (is.null(y_axis_size)) {
y_axis_size <- axis_size
} else {
if (!is.numeric(y_axis_size)) {
warning("Argument 'y_axis_size' not numeric. Using 'axis_size' instead.\n")
y_axis_size <- axis_size
} else if (!(length(y_axis_size) == 1)) {
warning("Argument 'y_axis_size' is not of length 1. Using 'axis_size' instead.\n")
y_axis_size <- axis_size
} else if (y_axis_size <= 0) {
warning("Argument 'y_axis_size' is not a positive number. Using 'axis_size' instead.\n")
y_axis_size <- axis_size
}
}
first_colname <- colnames(dat_mat)[1]
if (!is.null(x_labels) & !is.null(cutoff)) {
if (cutoff != length(x_labels)) {
stop("Value of argument 'cutoff' must be equal to length of 'x_labels'.\n", call. = FALSE)
}
if (grepl("0", first_colname)) {
dat_mat <- dat_mat[ , 1:(cutoff + 1)]
} else {
dat_mat <- dat_mat[ , 1:cutoff]
}
} else if (!is.null(x_labels)) {
if (length(x_labels) != ncol(dat_mat)) {
msg <- "Dimensions of 'x_labels' and 'dat_mat' must match"
msg <- paste(msg, "'x_labels' must be a vector of character labels equal")
msg <- paste(msg, "to the number of columns of 'dat_mat'.\n")
stop(msg, call. = FALSE)
}
x_breaks <- 1:ncol(dat_mat)
} else {
if (!is.null(cutoff)) {
if (grepl("0", first_colname)) {
dat_mat <- dat_mat[ , 1:(cutoff + 1)]
} else {
dat_mat <- dat_mat[ , 1:cutoff]
}
}
x_breaks <- 1:ncol(dat_mat)
if (grepl("0", first_colname)) {
x_labels <- as.character(0:(length(x_breaks - 1)))
} else {
x_labels <- as.character(x_breaks)
}
if (pretty_x) {
pretty_labels <- as.character(pretty(as.numeric(x_labels), n = 5))
x_labels[!(x_labels %in% pretty_labels)] <- ""
}
}
dat_mat_colnames <- colnames(dat_mat)
if (is.null(cutoff)) {
cutoff <- ncol(dat_mat)
}
dat_mat <- melt(dat_mat)[ , 2:3]
colnames(dat_mat) <- c("group", "values")
dat_mat$group <- factor(dat_mat$group, levels = dat_mat_colnames)
if (!is.null(line_dat)) {
if (length(line_dat) > cutoff) {
if (grepl("0", first_colname)) {
line_dat <- line_dat[1:(cutoff + 1)]
} else {
line_dat <- line_dat[1:cutoff]
}
}
} else {
line_dat <- matrix(0, nrow = 0, ncol = ncol(dat_mat))
}
names(line_dat) <- dat_mat_colnames
line_dat <- melt(t(as.matrix(line_dat)))[ , 2:3]
colnames(line_dat) <- c("group", "values")
y_breaks <- pretty(dat_mat$values)
y_labels <- as.character(y_breaks)
geom_id <- c(rep("box", nrow(dat_mat)), rep("line", nrow(line_dat)))
box_dat <- as.data.frame(cbind(rbind(dat_mat, line_dat), geom_id))
# NULL out aes() inputs to appease CRAN check
group <- values <- NULL
plot_ <- ggplot() +
geom_boxplot(data = subset(box_dat, geom_id == "box"),
aes(x = group, y = values), outlier.color = "NA") +
geom_line(data = subset(box_dat, geom_id == "line"),
aes(x = 1:length(x_breaks), y = values),
color = "red", size = line_size) +
theme_classic() +
labs(title = plot_title) +
xlab(x_axis_label) +
ylab(y_axis_label) +
theme(axis.title.x = element_text(family = "Times",
size = x_axis_label_size,
colour = "Black",
vjust = 0.5)) +
theme(axis.title.y = element_text(family = "Times",
size = y_axis_label_size,
colour = "Black",
margin = margin(r = 10))) +
theme(plot.title = element_text(family = "Times",
size = title_size,
colour = "Black",
vjust = 1)) +
theme(axis.text.x = element_text(color = "black",
family = "Times",
size = x_axis_size,
angle = x_angle,
vjust = 0.2,
hjust = 0.8)) +
theme(axis.text.y = element_text(color = "black",
size = y_axis_size,
family = "Times")) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank()) +
theme(legend.position = "none") +
scale_x_discrete(labels = x_labels) +
scale_y_continuous(expand = c(0, 1),
breaks = y_breaks)
return(plot_)
}
histplot_fun <- function(dat_mat, line_dat = NULL,
x_axis_label = NULL, y_axis_label = "Count",
plot_title = "", title_size = 18,
axis_label_size = 16, axis_size = 14, line_size = 1,
x_axis_label_size = NULL, y_axis_label_size = NULL,
x_axis_size = NULL, y_axis_size = NULL,
stat_name = NULL) {
if (!is.numeric(dat_mat)) {
stop("Argument 'dat_mat' must be numeric.\n", call. = FALSE)
} else if (!is.vector(dat_mat)) {
stop("Argument 'dat_mat' must be a vector.", call. = FALSE)
}
if (!is.null(line_dat)) {
if (!is.numeric(line_dat)) {
stop("Argument 'line_dat' must be numeric.\n", call. = FALSE)
} else if (!is.vector(line_dat)) {
stop("Argument 'line_dat' must be a single number.\n", call. = FALSE)
} else if (length(line_dat) != 1) {
stop("Argument 'line_dat' must be a single number.\n", call. = FALSE)
}
}
if (!is.numeric(line_size)) {
stop("Argument 'line_size' must be numeric.\n", call. = FALSE)
} else if (length(line_size) != 1) {
stop("Argument 'line_size' must be of length 1.\n", call. = FALSE)
} else if (line_size < 0) {
stop("Argument 'line_size' must be non-negative.\n", call. = FALSE)
}
if (is.null(x_axis_label)) {
x_axis_label <- stat_name
}
if (!(length(x_axis_label) == 1)) {
stop("Argument 'x_axis_label' is not a single character string.\n", call. = FALSE)
} else if (!is.character(x_axis_label)) {
stop("Argument 'x_axis_label' is not a character string.\n", call. = FALSE)
}
if (!(length(y_axis_label) == 1)) {
stop("Argument 'y_axis_label' is not a single character string.\n", call. = FALSE)
} else if (!is.character(y_axis_label)) {
stop("Argument 'y_axis_label' is not a character string.\n", call. = FALSE)
}
if (!(length(plot_title) == 1)) {
stop("Argument 'plot_title' is not a single character string.\n", call. = FALSE)
} else if (!is.character(plot_title)) {
stop("Argument 'plot_title' is not a character string.\n", call. = FALSE)
}
if (!is.numeric(title_size)) {
stop("Argument 'title_size' must be numeric.\n", call. = FALSE)
} else if (length(title_size) != 1) {
stop("Argument 'title_size' must be of length 1.\n", call. = FALSE)
} else if (title_size <= 0) {
stop("Argument 'title_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_label_size)) {
msg <- "Argument 'axis_label_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_label_size' and 'y_axis_label_size.\n")
stop(msg, call. = FALSE)
}
if (axis_label_size <= 0) {
stop("Argument 'axis_label_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_size)) {
msg <- "Argument 'axis_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_size' and 'y_axis_size.\n")
stop(msg, call. = FALSE)
}
if (axis_size <= 0) {
stop("Argument 'axis_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_label_size)) {
msg <- "Argument 'axis_label_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_label_size' and 'y_axis_label_size.\n")
stop(msg, call. = FALSE)
}
if (axis_label_size <= 0) {
stop("Argument 'axis_label_size' must be a positive number.\n", call. = FALSE)
}
if (!is.numeric(axis_size)) {
msg <- "Argument 'axis_size' must be a positive number."
msg <- paste(msg, "If you want to change the individual axis font sizes")
msg <- paste(msg, "then you should use specify 'x_axis_size' and 'y_axis_size.\n")
stop(msg, call. = FALSE)
}
if (axis_size <= 0) {
stop("Argument 'axis_size' must be a positive number.\n", call. = FALSE)
}
if (is.null(x_axis_label_size)) {
x_axis_label_size <- axis_label_size
} else {
if (!is.numeric(x_axis_label_size)) {
warning("Argument 'x_axis_label_size' not numeric. Using 'axis_label_size' instead.\n")
x_axis_label_size <- axis_label_size
} else if (!(length(x_axis_label_size) == 1)) {
warning("Argument 'x_axis_label_size' is not of length 1. Using 'axis_label_size instead.\n")
x_axis_label_size <- axis_label_size
} else if (x_axis_label_size <= 0) {
warning("Argument 'x_axis_label_size' not a positive number. Using 'axis_label_size' instead.\n")
x_axis_label_size <- axis_label_size
}
}
if (is.null(y_axis_label_size)) {
y_axis_label_size <- axis_label_size
} else {
if (!is.numeric(y_axis_label_size)) {
warning("Argument 'y_axis_label_size' not numeric. Using 'axis_label_size' instead.\n")
y_axis_label_size <- axis_label_size
} else if (!(length(y_axis_label_size) == 1)) {
warning("Argument 'y_axis_label_size' is not of length 1. Using 'axis_label_size' instead.\n")
y_axis_label_size <- axis_label_size
} else if (y_axis_label_size <= 0) {
warning("Argument 'y_axis_label_size' not a positive number. Using 'axis_label_size' instead.\n")
y_axis_label_size <- axis_label_size
}
}
if(is.null(x_axis_size)) {
x_axis_size <- axis_size
} else {
if (!is.numeric(x_axis_size)) {
warning("Argument 'x_axis_size' not numeric. Using 'axis_size' instead.\n")
x_axis_size <- axis_size
} else if (!(length(x_axis_size) == 1)) {
warning("Argument 'x_axis_size' is not of length 1. Using 'axis_size' instead.\n")
x_axis_size <- axis_size
} else if (x_axis_size <= 0) {
warning("Argument 'x_axis_size' not a positive number. Using 'axis_size' instead.\n")
x_axis_size <- axis_size
}
}
if (is.null(y_axis_size)) {
y_axis_size <- axis_size
} else {
if (!is.numeric(y_axis_size)) {
warning("Argument 'y_axis_size' not numeric. Using 'axis_size' instead.\n")
y_axis_size <- axis_size
} else if (!(length(y_axis_size) == 1)) {
warning("Argument 'y_axis_size' is not of length 1. Using 'axis_size' instead.\n")
y_axis_size <- axis_size
} else if (y_axis_size <= 0) {
warning("Argument 'y_axis_size' is not a positive number. Using 'axis_size' instead.\n")
y_axis_size <- axis_size
}
}
# Obtain histogram breaks using David Scott's binwidth rule
hist_values <- hist(dat_mat, plot = FALSE, breaks = "Scott")
hist_breaks <- diff(hist_values$breaks)[1]
if (is.null(line_dat)) {
line_dat <- matrix(0, nrow = 0, ncol = ncol(dat_mat))
}
y_breaks <- pretty(hist_values$counts)
y_labels <- as.character(y_breaks)
x_breaks <- pretty(dat_mat)
x_labels <- as.character(x_breaks)
geom_id <- c(rep("hist", length(dat_mat)), rep("line", 1))
hist_values <- c(dat_mat, line_dat)
hist_dat <- as.data.frame(cbind(hist_values, geom_id))
rownames(hist_dat) <- NULL
colnames(hist_dat) <- c("values", "geom_id")
hist_dat$values <- as.numeric(hist_dat$values)
#hist_dat$values <- as.numeric(levels(hist_dat$values))[hist_dat$values]
# NULL out the aes() inputs to appease CRAN check
values <- NULL
plot_ <- ggplot() +
geom_histogram(data = subset(hist_dat, geom_id == "hist"),
aes(values), binwidth = hist_breaks,
fill = "grey75", color = "grey25") +
geom_vline(data = subset(hist_dat, geom_id == "line"),
aes(xintercept = values),
color = "red", size = line_size) +
theme_classic() +
labs(title = plot_title) +
xlab(x_axis_label) +
ylab(y_axis_label) +
theme(axis.title.x = element_text(family = "Times",
size = x_axis_label_size,
colour = "Black",
vjust = 0.5)) +
theme(axis.title.y = element_text(family = "Times",
size = y_axis_label_size,
colour = "Black",
margin = margin(r = 10))) +
theme(plot.title = element_text(family = "Times",
size = title_size,
colour = "Black",
vjust = 1)) +
theme(axis.text.x = element_text(color = "black",
family = "Times",
size = x_axis_size,
vjust = 0.2,
hjust = 0.8)) +
theme(axis.text.y = element_text(color = "black",
size = y_axis_size,
family = "Times")) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank()) +
theme(legend.position = "none") +
scale_x_continuous(breaks = x_breaks,
labels = x_labels) +
scale_y_continuous(expand = c(0, 0),
breaks = y_breaks)
return(plot_)
}
check_terms <- function(form, K) {
check_formula(form)
all_vars <- all.vars(form, functions = TRUE)
all_vars <- all_vars[!(all_vars %in% c("-", "+", "~", ":"))]
all_vars <- all_vars[-1]
allowable_terms <- c("edges",
"mutual",
"gwesp",
"dgwesp",
"gwdegree",
"gwodegree",
"gwidegree",
"triangle",
"nodematch",
"transitiveties",
"cycle",
"ttriple",
"ctriple",
"ddsp",
"degree",
"desp",
"gwdsp",
"dsp",
"esp",
"isolates",
"kstar",
"istar",
"nodefactor",
"nodeifactor",
"nodeofactor",
"nodemix",
"nodecov",
"nodeicov",
"nodeocov",
"edgecov",
"idegree",
"odegree",
"ostar",
"twopath",
"absdiff")
if (K == 1) {
allowable_terms <- c(allowable_terms, "sender", "receiver", "sociality")
}
check_terms <- all_vars %in% allowable_terms
if (any(check_terms == FALSE)) {
location <- which(check_terms == FALSE)
msg <- "The following terms are not supported at this time: "
for (i in 1:length(location)) {
cur_loc <- location[i]
if (i < length(location)) {
msg <- paste0(msg, all_vars[cur_loc], ", ")
} else {
msg <- paste0(msg, all_vars[cur_loc], ".\n")
}
}
stop(msg, call. = FALSE)
}
}
check_formula <- function(form) {
if (!is.formula(form)) {
stop("Argument 'form' must be a 'formula' class object.\n", call. = FALSE)
}
can_get_network <- tryCatch(ergm.getnetwork(form),
error = function(err) { return(err) })
if (!is.network(can_get_network)) {
stop("Cannot extract network from formula provided. Check that a valid formula was specified.", call. = FALSE)
}
}
is.formula <- function(form) {
res <- "formula" %in% is(form)
return(res)
}
|
# R Script to summaries the results of the samples for each CG panel primer pair
projdir <- "/home/dyap/Projects/Takeda_T3/CG"
# This is the input file which can be in ang format
# This particular script takes in the SDS2.4 RQ format and recommends that you manually edit out blank lines and headers
# USE /home/dyap/Projects/Takeda_T3/CG/pre-process_SDS.sh to pre-process the SDS file
exptname="qPCR_QC_T3_treatment_CG_Panel"
infile="T3_CGPanel.csv"
sam="T3_conc_samples"
input <- paste(projdir,infile, sep = "/")
samfile <-paste(projdir, sam, sep="/")
pdffile <- paste(paste(prodir,exptname,sep="/"),"pdf",sep=".")
# This is the whole set of primers ordered
pri <- paste(projdir, "/CG_primers_ordered.txt", sep = "")
pridf <- read.table(file = pri, header = FALSE, stringsAsFactors = FALSE)
names(pridf)[1] <- "Primers"
head(pridf)
ppdf <- read.table(file = input, sep="\t", stringsAsFactors = FALSE, header = TRUE)
head(ppdf)
#####################
# samples <- read.table(file=samfile, sep="\n", stringsAsFactors = FALSE, header = FALSE)
# importing using read tables does NOT work
samples <- c("1_Old_0", "2_Old_Med", "3_Old_Hi", "4_New_0", "5_New_Med", "6_New_Hi")
sdf <- data.frame(Samples = samples, stringsAsFactors = FALSE)
#colnames(sdf)[1]<-"Samples"
### Ordering of primer gene names in output is arbitrary - need to make Primers variable same for either order of gene names.
ppdf$g1nm <- paste(ppdf$gene1, "@", ppdf$breakpt1, sep = "")
ppdf$g2nm <- paste(ppdf$gene2, "@", ppdf$breakpt2, sep = "")
g1priml <- lapply( ppdf$g1nm, function(x) which( grepl(x, pridf$Primers) ) )
g2priml <- lapply( ppdf$g2nm, function(x) which( grepl(x, pridf$Primers) ) )
intprim <- sapply( seq(nrow(ppdf) ), function(x) c(intersect(g1priml[[x]], g2priml[[x]] ), NA_integer_)[1] )
if (length(intprim) == nrow(ppdf)) { ppdf$PrimNo <- intprim } else { stop("Primer match error") }
ppdf$PrimersN <- sapply( seq(nrow(ppdf) ), function(x) c(intersect(g1priml[[x]], g2priml[[x]] ), NA_integer_)[1] )
ppdf$Primers <- pridf$Primers[ppdf$PrimersN]
ppdf$g1prim <- sapply(ppdf$g1nm, function(x) which(grepl(x, pridf$Primers)))
ppdf$g2prim <- sapply(ppdf$g2nm, function(x) which(grepl(x, pridf$Primers)))
### ?? Need to make dataframe for all samples and all primer pairs and merge with pipeline data
prsadf <- expand.grid(as.character(samples), as.character(pridf$Primers), stringsAsFactors = FALSE)
names(prsadf) <- c("sample_id", "Primers")
ppdf$g1nm <- NULL
ppdf$g2nm <- NULL
ppdf$g1prim <- NULL
ppdf$g2prim <- NULL
ppdf$PrimersN <- NULL
presdf <- merge(prsadf, ppdf, all.x = TRUE, all.y = FALSE, stringsAsFactors = FALSE)
table(prsadf$sample_id, useNA = "always")
table(prsadf$Primers, useNA = "always")
table(table(prsadf$sample_id, useNA = "always"))
table(table(prsadf$Primers, useNA = "always"))
table(presdf$sample_id, useNA = "always")
table(presdf$Primers, useNA = "always")
table(table(presdf$sample_id, useNA = "always"))
table(table(presdf$Primers, useNA = "always"))
presdfpso <- order(presdf$Primers, presdf$sample_id)
presdf[presdfpso, ]
head(presdf[presdfpso, ])
presdf[presdfpso, c("sample_id", "Primers", "RQ")]
head(presdf[presdfpso, c("sample_id", "RQ")])
presdf[presdfpso, c("sample_id", "RQ")]
## Why two records here?
## SA467 20 - TP53RK SLC13A3 45317771 45242364 0.3893030794165316 1.0049608511146972 0.522612128182016
## SA467 20 - TP53RK SLC13A3 45317771 45242364 0.38981245658717295 1.0062757755065448 0.5232959313710254
table(apply(presdf[, c("RQ")], 1, function(x) sum(is.na(x))))
### 0 3
### 417 367
### All are missing, or none are missing.
X11()
require("nlme")
presdf$sidf <- factor(presdf$sample_id)
presdf$sidn <- as.numeric(presdf$sidf)
presdf$Primerssp <- gsub(":", "\n", presdf$Primers)
trgd <- groupedData(RQ ~ sidn | Primerssp, data = presdf, order.groups = FALSE)
pdf(file = pdffile, width = 8, height = 10)
plot(trgd, aspect = "fill", par.strip.text=list(cex=0.7, lines = 3), layout = c(4, 5), as.table = TRUE)
dev.off()
| /R-scripts/CG-Panel_summary.R | no_license | oncoapop/data_reporting | R | false | false | 3,988 | r | # R Script to summaries the results of the samples for each CG panel primer pair
projdir <- "/home/dyap/Projects/Takeda_T3/CG"
# This is the input file which can be in ang format
# This particular script takes in the SDS2.4 RQ format and recommends that you manually edit out blank lines and headers
# USE /home/dyap/Projects/Takeda_T3/CG/pre-process_SDS.sh to pre-process the SDS file
exptname="qPCR_QC_T3_treatment_CG_Panel"
infile="T3_CGPanel.csv"
sam="T3_conc_samples"
input <- paste(projdir,infile, sep = "/")
samfile <-paste(projdir, sam, sep="/")
pdffile <- paste(paste(prodir,exptname,sep="/"),"pdf",sep=".")
# This is the whole set of primers ordered
pri <- paste(projdir, "/CG_primers_ordered.txt", sep = "")
pridf <- read.table(file = pri, header = FALSE, stringsAsFactors = FALSE)
names(pridf)[1] <- "Primers"
head(pridf)
ppdf <- read.table(file = input, sep="\t", stringsAsFactors = FALSE, header = TRUE)
head(ppdf)
#####################
# samples <- read.table(file=samfile, sep="\n", stringsAsFactors = FALSE, header = FALSE)
# importing using read tables does NOT work
samples <- c("1_Old_0", "2_Old_Med", "3_Old_Hi", "4_New_0", "5_New_Med", "6_New_Hi")
sdf <- data.frame(Samples = samples, stringsAsFactors = FALSE)
#colnames(sdf)[1]<-"Samples"
### Ordering of primer gene names in output is arbitrary - need to make Primers variable same for either order of gene names.
ppdf$g1nm <- paste(ppdf$gene1, "@", ppdf$breakpt1, sep = "")
ppdf$g2nm <- paste(ppdf$gene2, "@", ppdf$breakpt2, sep = "")
g1priml <- lapply( ppdf$g1nm, function(x) which( grepl(x, pridf$Primers) ) )
g2priml <- lapply( ppdf$g2nm, function(x) which( grepl(x, pridf$Primers) ) )
intprim <- sapply( seq(nrow(ppdf) ), function(x) c(intersect(g1priml[[x]], g2priml[[x]] ), NA_integer_)[1] )
if (length(intprim) == nrow(ppdf)) { ppdf$PrimNo <- intprim } else { stop("Primer match error") }
ppdf$PrimersN <- sapply( seq(nrow(ppdf) ), function(x) c(intersect(g1priml[[x]], g2priml[[x]] ), NA_integer_)[1] )
ppdf$Primers <- pridf$Primers[ppdf$PrimersN]
ppdf$g1prim <- sapply(ppdf$g1nm, function(x) which(grepl(x, pridf$Primers)))
ppdf$g2prim <- sapply(ppdf$g2nm, function(x) which(grepl(x, pridf$Primers)))
### ?? Need to make dataframe for all samples and all primer pairs and merge with pipeline data
prsadf <- expand.grid(as.character(samples), as.character(pridf$Primers), stringsAsFactors = FALSE)
names(prsadf) <- c("sample_id", "Primers")
ppdf$g1nm <- NULL
ppdf$g2nm <- NULL
ppdf$g1prim <- NULL
ppdf$g2prim <- NULL
ppdf$PrimersN <- NULL
presdf <- merge(prsadf, ppdf, all.x = TRUE, all.y = FALSE, stringsAsFactors = FALSE)
table(prsadf$sample_id, useNA = "always")
table(prsadf$Primers, useNA = "always")
table(table(prsadf$sample_id, useNA = "always"))
table(table(prsadf$Primers, useNA = "always"))
table(presdf$sample_id, useNA = "always")
table(presdf$Primers, useNA = "always")
table(table(presdf$sample_id, useNA = "always"))
table(table(presdf$Primers, useNA = "always"))
presdfpso <- order(presdf$Primers, presdf$sample_id)
presdf[presdfpso, ]
head(presdf[presdfpso, ])
presdf[presdfpso, c("sample_id", "Primers", "RQ")]
head(presdf[presdfpso, c("sample_id", "RQ")])
presdf[presdfpso, c("sample_id", "RQ")]
## Why two records here?
## SA467 20 - TP53RK SLC13A3 45317771 45242364 0.3893030794165316 1.0049608511146972 0.522612128182016
## SA467 20 - TP53RK SLC13A3 45317771 45242364 0.38981245658717295 1.0062757755065448 0.5232959313710254
table(apply(presdf[, c("RQ")], 1, function(x) sum(is.na(x))))
### 0 3
### 417 367
### All are missing, or none are missing.
X11()
require("nlme")
presdf$sidf <- factor(presdf$sample_id)
presdf$sidn <- as.numeric(presdf$sidf)
presdf$Primerssp <- gsub(":", "\n", presdf$Primers)
trgd <- groupedData(RQ ~ sidn | Primerssp, data = presdf, order.groups = FALSE)
pdf(file = pdffile, width = 8, height = 10)
plot(trgd, aspect = "fill", par.strip.text=list(cex=0.7, lines = 3), layout = c(4, 5), as.table = TRUE)
dev.off()
|
library(ecd)
### Name: ecd.fit_ts_conf
### Title: Timeseries fitting utility
### Aliases: ecd.fit_ts_conf
### Keywords: fit timeseries
### ** Examples
## Not run:
##D d <- ecd.fit_ts_conf(ts, conf)
## End(Not run)
| /data/genthat_extracted_code/ecd/examples/ecd.fit_ts_conf.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 222 | r | library(ecd)
### Name: ecd.fit_ts_conf
### Title: Timeseries fitting utility
### Aliases: ecd.fit_ts_conf
### Keywords: fit timeseries
### ** Examples
## Not run:
##D d <- ecd.fit_ts_conf(ts, conf)
## End(Not run)
|
## ---- include = FALSE----------------------------------------------------
knitr::opts_chunk$set(dev = "png", fig.height = 5, fig.width = 5, dpi = 300, out.width = "450px")
## ------------------------------------------------------------------------
library(phylopath)
models <- define_model_set(
one = c(RS ~ DD),
two = c(DD ~ NL, RS ~ LS + DD),
three = c(RS ~ NL),
four = c(RS ~ BM + NL),
five = c(RS ~ BM + NL + DD),
six = c(NL ~ RS, RS ~ BM),
seven = c(NL ~ RS, RS ~ LS + BM),
eight = c(NL ~ RS),
nine = c(NL ~ RS, RS ~ LS),
.common = c(LS ~ BM, NL ~ BM, DD ~ NL)
)
## ------------------------------------------------------------------------
models$one
## ---- fig.height = 5, fig.width = 5, dpi = 300---------------------------
plot(models$one)
## ---- fig.height=8, fig.width=8, out.width = "600px"---------------------
plot_model_set(models)
## ------------------------------------------------------------------------
result <- phylo_path(models, data = rhino, tree = rhino_tree,
order = c('BM', 'NL', 'DD', 'LS', 'RS'))
## ------------------------------------------------------------------------
result
## ------------------------------------------------------------------------
(s <- summary(result))
## ------------------------------------------------------------------------
plot(s)
## ------------------------------------------------------------------------
(best_model <- best(result))
## ---- warning = FALSE, fig.width = 6-------------------------------------
plot(best_model)
## ---- fig.width = 7------------------------------------------------------
average_model <- average(result)
plot(average_model, algorithm = 'mds', curvature = 0.1) # increase the curvature to avoid overlapping edges
## ---- fig.width = 7------------------------------------------------------
average_model_full <- average(result, method = "full")
plot(average_model_full, algorithm = 'mds', curvature = 0.1)
## ------------------------------------------------------------------------
coef_plot(best_model)
## ---- fig.height=3.5-----------------------------------------------------
coef_plot(average_model_full, reverse_order = TRUE) +
ggplot2::coord_flip() +
ggplot2::theme_bw()
## ------------------------------------------------------------------------
result$d_sep$one
| /vignettes/intro_to_phylopath.R | no_license | lzhangss/phylopath | R | false | false | 2,340 | r | ## ---- include = FALSE----------------------------------------------------
knitr::opts_chunk$set(dev = "png", fig.height = 5, fig.width = 5, dpi = 300, out.width = "450px")
## ------------------------------------------------------------------------
library(phylopath)
models <- define_model_set(
one = c(RS ~ DD),
two = c(DD ~ NL, RS ~ LS + DD),
three = c(RS ~ NL),
four = c(RS ~ BM + NL),
five = c(RS ~ BM + NL + DD),
six = c(NL ~ RS, RS ~ BM),
seven = c(NL ~ RS, RS ~ LS + BM),
eight = c(NL ~ RS),
nine = c(NL ~ RS, RS ~ LS),
.common = c(LS ~ BM, NL ~ BM, DD ~ NL)
)
## ------------------------------------------------------------------------
models$one
## ---- fig.height = 5, fig.width = 5, dpi = 300---------------------------
plot(models$one)
## ---- fig.height=8, fig.width=8, out.width = "600px"---------------------
plot_model_set(models)
## ------------------------------------------------------------------------
result <- phylo_path(models, data = rhino, tree = rhino_tree,
order = c('BM', 'NL', 'DD', 'LS', 'RS'))
## ------------------------------------------------------------------------
result
## ------------------------------------------------------------------------
(s <- summary(result))
## ------------------------------------------------------------------------
plot(s)
## ------------------------------------------------------------------------
(best_model <- best(result))
## ---- warning = FALSE, fig.width = 6-------------------------------------
plot(best_model)
## ---- fig.width = 7------------------------------------------------------
average_model <- average(result)
plot(average_model, algorithm = 'mds', curvature = 0.1) # increase the curvature to avoid overlapping edges
## ---- fig.width = 7------------------------------------------------------
average_model_full <- average(result, method = "full")
plot(average_model_full, algorithm = 'mds', curvature = 0.1)
## ------------------------------------------------------------------------
coef_plot(best_model)
## ---- fig.height=3.5-----------------------------------------------------
coef_plot(average_model_full, reverse_order = TRUE) +
ggplot2::coord_flip() +
ggplot2::theme_bw()
## ------------------------------------------------------------------------
result$d_sep$one
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.