content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(eval = FALSE)
## ---- eval = TRUE, echo = FALSE------------------------------------------
pkg_name = "usethis"
# pkg_name = "devtools"
## ------------------------------------------------------------------------
devtools::session_info()
|
/r_package_workflow/index.R
|
no_license
|
yunguan-wang/neuroc
|
R
| false
| false
| 337
|
r
|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(eval = FALSE)
## ---- eval = TRUE, echo = FALSE------------------------------------------
pkg_name = "usethis"
# pkg_name = "devtools"
## ------------------------------------------------------------------------
devtools::session_info()
|
setwd("~/R Studio personal projects/Curso Johns Hopkins/Getting and Cleaning Data/Final Project")
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/ProjectDataset.zip",method="curl")
unzip(zipfile="./data/ProjectDataset.zip",exdir="./data")
datapath <- file.path("./data" , "UCI HAR Dataset")
##Reading activity, training and features data
ActivityTest<- read.table(file.path(datapath, "test" , "y_test.txt" ),header = FALSE)
ActivityTrain<- read.table(file.path(datapath, "train", "y_train.txt"),header = FALSE)
SubjectTest<- read.table(file.path(datapath, "test" , "subject_test.txt"),header = FALSE)
SubjectTrain<- read.table(file.path(datapath, "train", "subject_train.txt"),header = FALSE)
FeaturesTest<- read.table(file.path(datapath, "test" , "X_test.txt" ),header = FALSE)
FeaturesTrain<- read.table(file.path(datapath, "train", "X_train.txt"),header = FALSE)
##Merging Activity with Training dataframes
Activity<- rbind(ActivityTrain, ActivityTest)
Subject <- rbind(SubjectTrain, SubjectTest)
Features<- rbind(FeaturesTrain, FeaturesTest)
#Creating names for columns (variables)
names(Subject)<-c("subject")
names(Activity)<- c("activity")
FeaturesNames <- read.table(file.path(datapath, "features.txt"),head=FALSE)
names(Features)<- FeaturesNames$V2
## Merging all data to create one dataset
data<- cbind(Subject, Activity)
data<- cbind(Features, data)
##extracting the mean and ST.dev for each measurement
subFeaturesNames<-FeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", FeaturesNames$V2)]
selectNames<-c(as.character(subFeaturesNames), "subject", "activity" )
data<-subset(data,select=selectNames)
str(data)
##adding labels to the activy column
Labels <- read.table(file.path(datapath, "activity_labels.txt"),header = FALSE)
data$activity <- factor(data$activity, levels = c(1,2,3,4,5,6),labels = c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING"))
head(data$activity)
##4. Appropriately labels the data set with descriptive variable names.
##honestly I did not found any descriptive variable name, neither on the website or the zipfile :(
##From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(plyr)
data2<-aggregate(. ~subject + activity, data, mean)
##putting subjetc and activity first in the new data set
data2<-data2[order(data2$subject,data2$activity),]
##there are 30 subjects and 6 activities, so the new data frame should have 180 obs
write.table(data2, file = "tidydata.txt",row.name=FALSE)
|
/run_analysis.R
|
no_license
|
juandacu/GettingAndCleaningData
|
R
| false
| false
| 2,773
|
r
|
setwd("~/R Studio personal projects/Curso Johns Hopkins/Getting and Cleaning Data/Final Project")
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/ProjectDataset.zip",method="curl")
unzip(zipfile="./data/ProjectDataset.zip",exdir="./data")
datapath <- file.path("./data" , "UCI HAR Dataset")
##Reading activity, training and features data
ActivityTest<- read.table(file.path(datapath, "test" , "y_test.txt" ),header = FALSE)
ActivityTrain<- read.table(file.path(datapath, "train", "y_train.txt"),header = FALSE)
SubjectTest<- read.table(file.path(datapath, "test" , "subject_test.txt"),header = FALSE)
SubjectTrain<- read.table(file.path(datapath, "train", "subject_train.txt"),header = FALSE)
FeaturesTest<- read.table(file.path(datapath, "test" , "X_test.txt" ),header = FALSE)
FeaturesTrain<- read.table(file.path(datapath, "train", "X_train.txt"),header = FALSE)
##Merging Activity with Training dataframes
Activity<- rbind(ActivityTrain, ActivityTest)
Subject <- rbind(SubjectTrain, SubjectTest)
Features<- rbind(FeaturesTrain, FeaturesTest)
#Creating names for columns (variables)
names(Subject)<-c("subject")
names(Activity)<- c("activity")
FeaturesNames <- read.table(file.path(datapath, "features.txt"),head=FALSE)
names(Features)<- FeaturesNames$V2
## Merging all data to create one dataset
data<- cbind(Subject, Activity)
data<- cbind(Features, data)
##extracting the mean and ST.dev for each measurement
subFeaturesNames<-FeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", FeaturesNames$V2)]
selectNames<-c(as.character(subFeaturesNames), "subject", "activity" )
data<-subset(data,select=selectNames)
str(data)
##adding labels to the activy column
Labels <- read.table(file.path(datapath, "activity_labels.txt"),header = FALSE)
data$activity <- factor(data$activity, levels = c(1,2,3,4,5,6),labels = c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING"))
head(data$activity)
##4. Appropriately labels the data set with descriptive variable names.
##honestly I did not found any descriptive variable name, neither on the website or the zipfile :(
##From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(plyr)
data2<-aggregate(. ~subject + activity, data, mean)
##putting subjetc and activity first in the new data set
data2<-data2[order(data2$subject,data2$activity),]
##there are 30 subjects and 6 activities, so the new data frame should have 180 obs
write.table(data2, file = "tidydata.txt",row.name=FALSE)
|
## clarkevans.R
## Clark-Evans statistic and test
## $Revision: 1.18 $ $Date: 2022/01/04 05:30:06 $
clarkevans <- function(X, correction=c("none", "Donnelly", "cdf"),
clipregion=NULL)
{
verifyclass(X, "ppp")
W <- X$window
# validate correction argument
gavecorrection <- !missing(correction)
correction <- pickoption("correction", correction,
c(none="none",
Donnelly="Donnelly",
donnelly="Donnelly",
guard="guard",
cdf="cdf"),
multi=TRUE)
if(("Donnelly" %in% correction) && (W$type != "rectangle")) {
if(gavecorrection)
warning("Donnelly correction only available for rectangular windows")
correction <- correction[correction != "Donnelly"]
}
# guard correction applied iff `clipregion' is present
isguard <- "guard" %in% correction
askguard <- any(isguard)
gaveguard <- !is.null(clipregion)
if(gaveguard)
clipregion <- as.owin(clipregion)
if(askguard && !gaveguard) {
warning("guard correction not performed; clipregion not specified")
correction <- correction[!isguard]
} else if(gaveguard && !askguard)
correction <- c(correction, "guard")
result <- clarkevansCalc(X, correction, clipregion)
if(length(result) == 1L) result <- unname(result)
return(result)
}
clarkevans.test <- function(X, ...,
correction="none",
clipregion=NULL,
alternative=c("two.sided", "less", "greater",
"clustered", "regular"),
nsim=999
) {
Xname <- short.deparse(substitute(X))
miss.nsim <- missing(nsim)
verifyclass(X, "ppp")
W <- Window(X)
nX <- npoints(X)
# validate SINGLE correction
correction <- pickoption("correction", correction,
c(none="none",
Donnelly="Donnelly",
donnelly="Donnelly",
guard="guard",
cdf="cdf"))
switch(correction,
none={
corrblurb <- "No edge correction"
},
Donnelly={
if(W$type != "rectangle")
stop("Donnelly correction only available for rectangular windows")
corrblurb <- "Donnelly correction"
},
guard={
if(is.null(clipregion))
stop("clipregion not specified")
clipregion <- as.owin(clipregion)
corrblurb <- "Guard correction"
},
cdf={
corrblurb <- "CDF correction"
})
# alternative hypothesis
if(missing(alternative) || is.null(alternative))
alternative <- "two.sided"
alternative <- pickoption("alternative", alternative,
c(two.sided="two.sided",
less="less",
clustered="less",
greater="greater",
regular="greater"))
altblurb <-
switch(alternative,
two.sided="two-sided",
less="clustered (R < 1)",
greater="regular (R > 1)")
# compute observed value
statistic <- clarkevansCalc(X, correction=correction, clipregion=clipregion,
working=TRUE)
working <- attr(statistic, "working")
#
if(correction == "none" && miss.nsim) {
# standard Normal p-value
SE <- with(working, sqrt(((4-pi)*areaW)/(4 * pi))/npts)
Z <- with(working, (Dobs - Dpois)/SE)
p.value <- switch(alternative,
less=pnorm(Z),
greater=1 - pnorm(Z),
two.sided= 2*(1-pnorm(abs(Z))))
pvblurb <- "Z-test"
} else {
# Monte Carlo p-value
sims <- numeric(nsim)
for(i in seq_len(nsim)) {
Xsim <- runifpoint(nX, win=W)
sims[i] <- clarkevansCalc(Xsim, correction=correction,
clipregion=clipregion)
}
p.upper <- (1 + sum(sims >= statistic))/(1.0 + nsim)
p.lower <- (1 + sum(sims <= statistic))/(1.0 + nsim)
p.value <- switch(alternative,
less=p.lower,
greater=p.upper,
two.sided=min(1, 2*min(p.lower, p.upper)))
pvblurb <- paste("Monte Carlo test based on",
nsim, "simulations of CSR with fixed n")
}
statistic <- as.numeric(statistic)
names(statistic) <- "R"
out <- list(statistic=statistic,
p.value=p.value,
alternative=altblurb,
method=c("Clark-Evans test", corrblurb, pvblurb),
data.name=Xname)
class(out) <- "htest"
return(out)
}
clarkevansCalc <- function(X, correction="none", clipregion=NULL,
working=FALSE) {
# calculations for Clark-Evans index or test
W <- Window(X)
areaW <- area(W)
npts <- npoints(X)
intensity <- npts/areaW
# R undefined for empty point pattern
if(npts == 0)
return(NA)
# Dobs = observed mean nearest neighbour distance
nndistX <- nndist(X)
Dobs <- mean(nndistX)
# Dpois = Expected mean nearest neighbour distance for Poisson process
Dpois <- 1/(2*sqrt(intensity))
statistic <- NULL
if(working)
work <- list(areaW=areaW, npts=npts, intensity=intensity,
Dobs=Dobs, Dpois=Dpois)
# Naive uncorrected value
if("none" %in% correction) {
Rnaive <- Dobs/Dpois
statistic <- c(statistic, naive=Rnaive)
}
# Donnelly edge correction
if("Donnelly" %in% correction) {
# Dedge = Edge corrected mean nearest neighbour distance, Donnelly 1978
if(W$type == "rectangle") {
perim <- perimeter(W)
Dkevin <- Dpois + (0.0514+0.0412/sqrt(npts))*perim/npts
Rkevin <- Dobs/Dkevin
if(working) work <- append(work, list(perim=perim, Dkevin=Dkevin))
} else
Rkevin <- NA
statistic <- c(statistic, Donnelly=Rkevin)
}
# guard area method
if("guard" %in% correction && !is.null(clipregion)) {
# use nn distances from points inside `clipregion'
ok <- inside.owin(X, , clipregion)
Dguard <- mean(nndistX[ok])
Rguard <- Dguard/Dpois
if(working) work <- append(work, list(Dguard=Dguard))
statistic <- c(statistic, guard=Rguard)
}
if("cdf" %in% correction) {
# compute mean of estimated nearest-neighbour distance distribution G
G <- Gest(X)
numer <- stieltjes(function(x){x}, G)$km
denom <- stieltjes(function(x){rep.int(1, length(x))}, G)$km
Dcdf <- numer/denom
Rcdf <- Dcdf/Dpois
if(working) work <- append(work, list(Dcdf=Dcdf))
statistic <- c(statistic, cdf=Rcdf)
}
if(working) attr(statistic, "working") <- work
return(statistic)
}
|
/R/clarkevans.R
|
no_license
|
spatstat/spatstat.core
|
R
| false
| false
| 6,867
|
r
|
## clarkevans.R
## Clark-Evans statistic and test
## $Revision: 1.18 $ $Date: 2022/01/04 05:30:06 $
clarkevans <- function(X, correction=c("none", "Donnelly", "cdf"),
clipregion=NULL)
{
verifyclass(X, "ppp")
W <- X$window
# validate correction argument
gavecorrection <- !missing(correction)
correction <- pickoption("correction", correction,
c(none="none",
Donnelly="Donnelly",
donnelly="Donnelly",
guard="guard",
cdf="cdf"),
multi=TRUE)
if(("Donnelly" %in% correction) && (W$type != "rectangle")) {
if(gavecorrection)
warning("Donnelly correction only available for rectangular windows")
correction <- correction[correction != "Donnelly"]
}
# guard correction applied iff `clipregion' is present
isguard <- "guard" %in% correction
askguard <- any(isguard)
gaveguard <- !is.null(clipregion)
if(gaveguard)
clipregion <- as.owin(clipregion)
if(askguard && !gaveguard) {
warning("guard correction not performed; clipregion not specified")
correction <- correction[!isguard]
} else if(gaveguard && !askguard)
correction <- c(correction, "guard")
result <- clarkevansCalc(X, correction, clipregion)
if(length(result) == 1L) result <- unname(result)
return(result)
}
clarkevans.test <- function(X, ...,
correction="none",
clipregion=NULL,
alternative=c("two.sided", "less", "greater",
"clustered", "regular"),
nsim=999
) {
Xname <- short.deparse(substitute(X))
miss.nsim <- missing(nsim)
verifyclass(X, "ppp")
W <- Window(X)
nX <- npoints(X)
# validate SINGLE correction
correction <- pickoption("correction", correction,
c(none="none",
Donnelly="Donnelly",
donnelly="Donnelly",
guard="guard",
cdf="cdf"))
switch(correction,
none={
corrblurb <- "No edge correction"
},
Donnelly={
if(W$type != "rectangle")
stop("Donnelly correction only available for rectangular windows")
corrblurb <- "Donnelly correction"
},
guard={
if(is.null(clipregion))
stop("clipregion not specified")
clipregion <- as.owin(clipregion)
corrblurb <- "Guard correction"
},
cdf={
corrblurb <- "CDF correction"
})
# alternative hypothesis
if(missing(alternative) || is.null(alternative))
alternative <- "two.sided"
alternative <- pickoption("alternative", alternative,
c(two.sided="two.sided",
less="less",
clustered="less",
greater="greater",
regular="greater"))
altblurb <-
switch(alternative,
two.sided="two-sided",
less="clustered (R < 1)",
greater="regular (R > 1)")
# compute observed value
statistic <- clarkevansCalc(X, correction=correction, clipregion=clipregion,
working=TRUE)
working <- attr(statistic, "working")
#
if(correction == "none" && miss.nsim) {
# standard Normal p-value
SE <- with(working, sqrt(((4-pi)*areaW)/(4 * pi))/npts)
Z <- with(working, (Dobs - Dpois)/SE)
p.value <- switch(alternative,
less=pnorm(Z),
greater=1 - pnorm(Z),
two.sided= 2*(1-pnorm(abs(Z))))
pvblurb <- "Z-test"
} else {
# Monte Carlo p-value
sims <- numeric(nsim)
for(i in seq_len(nsim)) {
Xsim <- runifpoint(nX, win=W)
sims[i] <- clarkevansCalc(Xsim, correction=correction,
clipregion=clipregion)
}
p.upper <- (1 + sum(sims >= statistic))/(1.0 + nsim)
p.lower <- (1 + sum(sims <= statistic))/(1.0 + nsim)
p.value <- switch(alternative,
less=p.lower,
greater=p.upper,
two.sided=min(1, 2*min(p.lower, p.upper)))
pvblurb <- paste("Monte Carlo test based on",
nsim, "simulations of CSR with fixed n")
}
statistic <- as.numeric(statistic)
names(statistic) <- "R"
out <- list(statistic=statistic,
p.value=p.value,
alternative=altblurb,
method=c("Clark-Evans test", corrblurb, pvblurb),
data.name=Xname)
class(out) <- "htest"
return(out)
}
clarkevansCalc <- function(X, correction="none", clipregion=NULL,
working=FALSE) {
# calculations for Clark-Evans index or test
W <- Window(X)
areaW <- area(W)
npts <- npoints(X)
intensity <- npts/areaW
# R undefined for empty point pattern
if(npts == 0)
return(NA)
# Dobs = observed mean nearest neighbour distance
nndistX <- nndist(X)
Dobs <- mean(nndistX)
# Dpois = Expected mean nearest neighbour distance for Poisson process
Dpois <- 1/(2*sqrt(intensity))
statistic <- NULL
if(working)
work <- list(areaW=areaW, npts=npts, intensity=intensity,
Dobs=Dobs, Dpois=Dpois)
# Naive uncorrected value
if("none" %in% correction) {
Rnaive <- Dobs/Dpois
statistic <- c(statistic, naive=Rnaive)
}
# Donnelly edge correction
if("Donnelly" %in% correction) {
# Dedge = Edge corrected mean nearest neighbour distance, Donnelly 1978
if(W$type == "rectangle") {
perim <- perimeter(W)
Dkevin <- Dpois + (0.0514+0.0412/sqrt(npts))*perim/npts
Rkevin <- Dobs/Dkevin
if(working) work <- append(work, list(perim=perim, Dkevin=Dkevin))
} else
Rkevin <- NA
statistic <- c(statistic, Donnelly=Rkevin)
}
# guard area method
if("guard" %in% correction && !is.null(clipregion)) {
# use nn distances from points inside `clipregion'
ok <- inside.owin(X, , clipregion)
Dguard <- mean(nndistX[ok])
Rguard <- Dguard/Dpois
if(working) work <- append(work, list(Dguard=Dguard))
statistic <- c(statistic, guard=Rguard)
}
if("cdf" %in% correction) {
# compute mean of estimated nearest-neighbour distance distribution G
G <- Gest(X)
numer <- stieltjes(function(x){x}, G)$km
denom <- stieltjes(function(x){rep.int(1, length(x))}, G)$km
Dcdf <- numer/denom
Rcdf <- Dcdf/Dpois
if(working) work <- append(work, list(Dcdf=Dcdf))
statistic <- c(statistic, cdf=Rcdf)
}
if(working) attr(statistic, "working") <- work
return(statistic)
}
|
#############################################################
# #
# This script analyzes Twitter data for variation of the #
# linguistic variable (lol). Script should be run from the #
# LaTeX write-up using knitr #
# #
# -Joshua McNeill (joshua dot mcneill at uga dot edu) #
# #
#############################################################
## ---- load_packages_functions_data ----
library(knitr)
library(tools)
library(ggplot2)
library(igraph)
library(vegan)
library(sentimentr)
library(dplyr)
library(plyr)
library(lsr)
# Data
source("data_cleaning.R")
# Remove scientific notation
options(scipen = 999)
## Functions
# Get the Simpson's index of diversity for tokens of (lol)
getDiversity <- function(variants){
divSimpson <- diversity(table(variants), index = "simpson")
return(divSimpson)
}
# Get Simpson's index of diversity and centrality for each user individually
getDivByCent <- function(df, speaker, variable, centrality){
diversities <- aggregate(eval(parse(text = paste0(df, "$", variable))),
list(eval(parse(text = paste0(df, "$", speaker)))),
getDiversity)
centralities <- aggregate(eval(parse(text = paste0(df, "$", centrality))),
list(eval(parse(text = paste0(df, "$", speaker)))),
unique)
divByCent <- merge(centralities, diversities, by = 1)
colnames(divByCent) <- c(toTitleCase(speaker), toTitleCase(centrality), "Diversity")
return(divByCent)
}
# Draw a scatter plot comparing Simpon's index of diversity and a centrality measure
graphDivByCent <- function(df, centrality, xlab) {
divByCent <- getDivByCent(df, "User", "lol", centrality)
ggplot(divByCent,
aes(x = eval(parse(text = toTitleCase(centrality))),
y = Diversity)) +
theme_bw() +
labs(x = xlab) +
geom_point()
}
# Draw a bar graph for the distribution of (lol) for a set of users
graphlolDistUsers <- function(df) {
ggplot(df,
aes(x = lol)) +
facet_wrap(. ~ df$Community + df$User, ncol = 5) +
labs(x = "(lol)", y = "Relative Frequency") +
theme_bw() +
geom_bar(aes(y = ..count.. / sapply(PANEL, FUN = function(x) sum(count[PANEL == x]))))
}
# Get the mode of a vector
getMode <- function(vector) {
types <- unique(vector)
types[which.max(tabulate(match(vector, types)))]
}
## Summary data frames and subsets
# Get sentiments for tweets with (lol) using default polarity dictionary (doesn't include <lol>)
lol$Sentiment <- sentiment_by(lol$Text)$ave_sentiment
# Data frame for only non-zero sentiments for the three main (lol) variants
lolSentMajorVars <- lol[lol$lol == "lol" | lol$lol == "LOL" | lol$lol == "Lol",]
lolSentMajorVars <- lolSentMajorVars[lolSentMajorVars$Sentiment != 0,]
# Get the means for each variant
lolSentMeans <- tapply(lolSentMajorVars$Sentiment, lolSentMajorVars$lol, mean)
# Identify active users, producing at least 10 tokens
Active <- data.frame(table(lol$User) >= 10)
Active$User <- row.names(Active)
usersActive <- Active[Active$table.lol.User.....10 == TRUE, 2]
rm(Active)
lolActive <- lol[is.element(lol$User, usersActive),]
# Create a data frame summarizing each community
communitiesModes <- aggregate(lol$lol, list(lol$Community), getMode)
communitiesDivs <- aggregate(lol$lol, list(lol$Community), getDiversity)
communitiesSize <- aggregate(usersAll$User, list(usersAll$Community), unique)
communitiesSize[,3] <- sapply(communitiesSize$x, length)
communitiesModesDivs <- merge(communitiesModes, communitiesDivs, by = 1)
communitiesSummary <- merge(communitiesModesDivs, communitiesSize, by = 1, all.y = FALSE)
rm(list = c("communitiesModes", "communitiesDivs", "communitiesSize", "communitiesModesDivs"))
colnames(communitiesSummary) <- c("Community", "Mode", "Diversity", "Users", "Size")
# Create a data frame summarizing each province
provincesModes <- aggregate(lol$lol, list(lol$Province), getMode)
provincesDivs <- aggregate(lol$lol, list(lol$Province), getDiversity)
provincesSize <- aggregate(lol$User, list(lol$Province), unique)
provincesSize[,3] <- sapply(provincesSize$x, length)
provincesModesDivs <- merge(provincesModes, provincesDivs, by = 1)
provincesSummary <- merge(provincesModesDivs, provincesSize, by = 1, all.y = FALSE)
rm(list = c("provincesModes", "provincesDivs", "provincesSize", "provincesModesDivs"))
colnames(provincesSummary) <- c("Province", "Mode", "Diversity", "Users", "Size")
provincesSummary <- provincesSummary[provincesSummary$Province != "Undefined",]
# Create a data frame summarizing each user
usersCommunities <- aggregate(lol$Community, list(lol$User), function(x) head(x, n = 1))
usersModes <- aggregate(lol$lol, list(lol$User), getMode)
usersDivByCent <- getDivByCent("lol", "User", "lol", "PageRank")
usersCommsModes <- merge(usersCommunities, usersModes, by = 1)
usersSummary <- merge(usersCommsModes, usersDivByCent, by = 1)
usersSummary$Tokens <- table(lol$User)
rm(list = c("usersCommunities", "usersModes", "usersDivByCent", "usersCommsModes"))
colnames(usersSummary) <- c("User", "Community", "Mode", "PageRank", "Diversity", "Tokens")
usersSummary <- merge(usersSummary, communitiesSummary[, c("Community", "Diversity")], by = "Community")
colnames(usersSummary) <- c("Community", "User", "Mode", "PageRank", "Diversity", "Tokens", "Diversity_Comm")
# Add the PageRank percentile for each user
for (community in unique(usersSummary$Community)) {
sub <- subset(usersSummary, usersSummary$Community == community)
for (user in unique(sub$User)) {
if (!exists("usersPRPercentile")) {
usersPRPercentile <- data.frame(User = user,
PR_Percentile = ecdf(sub$PageRank)(sub[sub$User == user,]$PageRank))
} else {
usersPRPercentile <- rbind(usersPRPercentile,
data.frame(User = user,
PR_Percentile = ecdf(sub$PageRank)(sub[sub$User == user,]$PageRank)))
}
}
}
rm(list = c("community", "sub", "user"))
usersSummary <- merge(usersSummary, usersPRPercentile, by = "User")
# Add the PageRank Percentiles to the (lol) observations
lol <- merge(lol, usersPRPercentile, by = "User")
lolActive <- merge(lolActive, usersPRPercentile, by = "User")
rm(usersPRPercentile)
# Create a data frame summarizing community 2265
usersSummary2265 <- usersSummary[usersSummary$Community == 2265,]
# Create a data frame summarizing active users, producing at least 10 tokens
usersSummaryActive <- usersSummary[usersSummary$Tokens >= 10,]
# Create a data frame summarizing active users that don't have <lol> as their mode
usersSummaryActiveOut <- usersSummaryActive[usersSummaryActive$Mode != "lol" & usersSummaryActive$Tokens >= 20,]
# Subset data frame including only communities that users with the 5 highest and
# 5 lowest PageRanks belong to
lolDistComms <- lol[lol$Community == 2265 |
lol$Community == 1291 |
lol$Community == 1032,]
# Grab only variants of (lol) over 1
variants <- table(lolDistComms$lol)
variantsGT <- names(variants[variants > 5])
variantsLT <- names(variants[variants <= 5])
lolDistComms <- lolDistComms[lolDistComms$lol == "lol" |
lolDistComms$lol == "Lol" |
lolDistComms$lol == "LOL",]
# Subset data frame including only users with the 5 highest PageRanks
lolDistUsersHigh <- lol[lol$User == usersSummaryActive[order(usersSummaryActive$PageRank, decreasing = TRUE),][1, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank, decreasing = TRUE),][2, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank, decreasing = TRUE),][3, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank, decreasing = TRUE),][4, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank, decreasing = TRUE),][5, "User"],]
lolDistUsersHigh <- lolDistUsersHigh[lolDistUsersHigh$lol == "lol" |
lolDistUsersHigh$lol == "Lol" |
lolDistUsersHigh$lol == "LOL",]
# Subset data frame including only users with the 5 lowest PageRanks
lolDistUsersLow <- lol[lol$User == usersSummaryActive[order(usersSummaryActive$PageRank),][5, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank),][4, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank),][3, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank),][2, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank),][1, "User"],]
lolDistUsersLow <- lolDistUsersLow[lolDistUsersLow$lol == "lol" |
lolDistUsersLow$lol == "Lol" |
lolDistUsersLow$lol == "LOL",]
## Analysis
# Perform significance tests for each factor other than sentiment
lolSigTests <- lapply(list(lol$Community, lol$Province), function(column)
list(
fisher.test(lol$lol, column, simulate.p.value = TRUE),
cramersV(lol$lol, column))
)
names(lolSigTests) <- c("Community", "Province")
names(lolSigTests$Community) <- c("Test", "Effect")
names(lolSigTests$Province) <- c("Test", "Effect")
# Get sentiment of an example sentence to demonstrate the process
sentence <- lst(
words = c("great", "sentences", "can", "be", "ugly"),
sentiment = sentiment(paste(words, collapse = " ")),
word_sentiments = sapply(words, function (word) if (!any(lexicon::hash_sentiment_jockers_rinker == word)) {
0
} else {
lexicon::hash_sentiment_jockers_rinker[x == word, "y"]
}
)
)
# Perform a one-way ANOVA for the means of sentiments
lolSentSigTest <- aov(Sentiment ~ lol, data = lolSentMajorVars)
lolSentSigTestSummary <- summary(lolSentSigTest)
## ---- example_community ----
par(mar = c(2, 4, 3, 2))
plot.igraph(
graph(edges = c("Ted", "Bob", "Bob", "Joe", "Joe", "Kelly", "Kelly", "Bob"), directed = FALSE),
layout = layout_with_kk,
vertex.color = "yellow",
vertex.label.dist = 4
)
## ---- divByPR_graph_active ----
lolActive$PageRankLog <- log(lolActive$PageRank)
graphDivByCent("lolActive", "PageRankLog", "PageRank (log)")
## ---- divByPRPerc_graph_active ----
graphDivByCent("lolActive", "PR_Percentile", "PageRank Percentile")
## ---- lol_dist_comms ----
lolDistComms$community_ordered <- factor(lolDistComms$Community, levels = c("1291", "2265", "1032"))
ggplot(lolDistComms,
aes(x = lol)) +
facet_wrap(.~lolDistComms$community_ordered, ncol = 3) +
labs(x = "(lol)", y = "Relative Frequency") +
theme_bw() +
geom_bar(aes(y = ..count.. / sapply(PANEL, FUN = function(x) sum(count[PANEL == x]))))
## ---- lol_dist_users_high ----
graphlolDistUsers(lolDistUsersHigh)
## ---- lol_dist_users_low ----
graphlolDistUsers(lolDistUsersLow)
## ---- div_individual_community ----
ggplot(usersSummaryActive,
aes(x = Diversity,
y = Diversity_Comm)) +
scale_x_continuous(limits = c(0, 0.75)) +
scale_y_continuous(limits = c(0, 0.75)) +
labs(x = "Individual Diversity", y = "Community Diversity") +
theme_bw() +
geom_point() +
geom_abline(slope = 1, intercept = 0)
## ---- sentiment_lol_hist ----
ggplot(lolSentMajorVars,
aes(x = Sentiment, y = stat(density * width))) +
facet_grid(lol ~ .) +
labs(y = "Density") +
theme_bw() +
geom_histogram() +
geom_vline(data = filter(lolSentMajorVars, lol == "lol"), aes(xintercept = mean(Sentiment)), color = "red", lwd = 1) +
geom_vline(data = filter(lolSentMajorVars, lol == "Lol"), aes(xintercept = mean(Sentiment)), color = "red", lwd = 1) +
geom_vline(data = filter(lolSentMajorVars, lol == "LOL"), aes(xintercept = mean(Sentiment)), color = "red", lwd = 1)
|
/analysis.R
|
no_license
|
joshisanonymous/lol_ortho_variation
|
R
| false
| false
| 12,106
|
r
|
#############################################################
# #
# This script analyzes Twitter data for variation of the #
# linguistic variable (lol). Script should be run from the #
# LaTeX write-up using knitr #
# #
# -Joshua McNeill (joshua dot mcneill at uga dot edu) #
# #
#############################################################
## ---- load_packages_functions_data ----
library(knitr)
library(tools)
library(ggplot2)
library(igraph)
library(vegan)
library(sentimentr)
library(dplyr)
library(plyr)
library(lsr)
# Data
source("data_cleaning.R")
# Remove scientific notation
options(scipen = 999)
## Functions
# Get the Simpson's index of diversity for tokens of (lol)
getDiversity <- function(variants){
divSimpson <- diversity(table(variants), index = "simpson")
return(divSimpson)
}
# Get Simpson's index of diversity and centrality for each user individually
getDivByCent <- function(df, speaker, variable, centrality){
diversities <- aggregate(eval(parse(text = paste0(df, "$", variable))),
list(eval(parse(text = paste0(df, "$", speaker)))),
getDiversity)
centralities <- aggregate(eval(parse(text = paste0(df, "$", centrality))),
list(eval(parse(text = paste0(df, "$", speaker)))),
unique)
divByCent <- merge(centralities, diversities, by = 1)
colnames(divByCent) <- c(toTitleCase(speaker), toTitleCase(centrality), "Diversity")
return(divByCent)
}
# Draw a scatter plot comparing Simpon's index of diversity and a centrality measure
graphDivByCent <- function(df, centrality, xlab) {
divByCent <- getDivByCent(df, "User", "lol", centrality)
ggplot(divByCent,
aes(x = eval(parse(text = toTitleCase(centrality))),
y = Diversity)) +
theme_bw() +
labs(x = xlab) +
geom_point()
}
# Draw a bar graph for the distribution of (lol) for a set of users
graphlolDistUsers <- function(df) {
ggplot(df,
aes(x = lol)) +
facet_wrap(. ~ df$Community + df$User, ncol = 5) +
labs(x = "(lol)", y = "Relative Frequency") +
theme_bw() +
geom_bar(aes(y = ..count.. / sapply(PANEL, FUN = function(x) sum(count[PANEL == x]))))
}
# Get the mode of a vector
getMode <- function(vector) {
types <- unique(vector)
types[which.max(tabulate(match(vector, types)))]
}
## Summary data frames and subsets
# Get sentiments for tweets with (lol) using default polarity dictionary (doesn't include <lol>)
lol$Sentiment <- sentiment_by(lol$Text)$ave_sentiment
# Data frame for only non-zero sentiments for the three main (lol) variants
lolSentMajorVars <- lol[lol$lol == "lol" | lol$lol == "LOL" | lol$lol == "Lol",]
lolSentMajorVars <- lolSentMajorVars[lolSentMajorVars$Sentiment != 0,]
# Get the means for each variant
lolSentMeans <- tapply(lolSentMajorVars$Sentiment, lolSentMajorVars$lol, mean)
# Identify active users, producing at least 10 tokens
Active <- data.frame(table(lol$User) >= 10)
Active$User <- row.names(Active)
usersActive <- Active[Active$table.lol.User.....10 == TRUE, 2]
rm(Active)
lolActive <- lol[is.element(lol$User, usersActive),]
# Create a data frame summarizing each community
communitiesModes <- aggregate(lol$lol, list(lol$Community), getMode)
communitiesDivs <- aggregate(lol$lol, list(lol$Community), getDiversity)
communitiesSize <- aggregate(usersAll$User, list(usersAll$Community), unique)
communitiesSize[,3] <- sapply(communitiesSize$x, length)
communitiesModesDivs <- merge(communitiesModes, communitiesDivs, by = 1)
communitiesSummary <- merge(communitiesModesDivs, communitiesSize, by = 1, all.y = FALSE)
rm(list = c("communitiesModes", "communitiesDivs", "communitiesSize", "communitiesModesDivs"))
colnames(communitiesSummary) <- c("Community", "Mode", "Diversity", "Users", "Size")
# Create a data frame summarizing each province
provincesModes <- aggregate(lol$lol, list(lol$Province), getMode)
provincesDivs <- aggregate(lol$lol, list(lol$Province), getDiversity)
provincesSize <- aggregate(lol$User, list(lol$Province), unique)
provincesSize[,3] <- sapply(provincesSize$x, length)
provincesModesDivs <- merge(provincesModes, provincesDivs, by = 1)
provincesSummary <- merge(provincesModesDivs, provincesSize, by = 1, all.y = FALSE)
rm(list = c("provincesModes", "provincesDivs", "provincesSize", "provincesModesDivs"))
colnames(provincesSummary) <- c("Province", "Mode", "Diversity", "Users", "Size")
provincesSummary <- provincesSummary[provincesSummary$Province != "Undefined",]
# Create a data frame summarizing each user
usersCommunities <- aggregate(lol$Community, list(lol$User), function(x) head(x, n = 1))
usersModes <- aggregate(lol$lol, list(lol$User), getMode)
usersDivByCent <- getDivByCent("lol", "User", "lol", "PageRank")
usersCommsModes <- merge(usersCommunities, usersModes, by = 1)
usersSummary <- merge(usersCommsModes, usersDivByCent, by = 1)
usersSummary$Tokens <- table(lol$User)
rm(list = c("usersCommunities", "usersModes", "usersDivByCent", "usersCommsModes"))
colnames(usersSummary) <- c("User", "Community", "Mode", "PageRank", "Diversity", "Tokens")
usersSummary <- merge(usersSummary, communitiesSummary[, c("Community", "Diversity")], by = "Community")
colnames(usersSummary) <- c("Community", "User", "Mode", "PageRank", "Diversity", "Tokens", "Diversity_Comm")
# Add the PageRank percentile for each user
for (community in unique(usersSummary$Community)) {
sub <- subset(usersSummary, usersSummary$Community == community)
for (user in unique(sub$User)) {
if (!exists("usersPRPercentile")) {
usersPRPercentile <- data.frame(User = user,
PR_Percentile = ecdf(sub$PageRank)(sub[sub$User == user,]$PageRank))
} else {
usersPRPercentile <- rbind(usersPRPercentile,
data.frame(User = user,
PR_Percentile = ecdf(sub$PageRank)(sub[sub$User == user,]$PageRank)))
}
}
}
rm(list = c("community", "sub", "user"))
usersSummary <- merge(usersSummary, usersPRPercentile, by = "User")
# Add the PageRank Percentiles to the (lol) observations
lol <- merge(lol, usersPRPercentile, by = "User")
lolActive <- merge(lolActive, usersPRPercentile, by = "User")
rm(usersPRPercentile)
# Create a data frame summarizing community 2265
usersSummary2265 <- usersSummary[usersSummary$Community == 2265,]
# Create a data frame summarizing active users, producing at least 10 tokens
usersSummaryActive <- usersSummary[usersSummary$Tokens >= 10,]
# Create a data frame summarizing active users that don't have <lol> as their mode
usersSummaryActiveOut <- usersSummaryActive[usersSummaryActive$Mode != "lol" & usersSummaryActive$Tokens >= 20,]
# Subset data frame including only communities that users with the 5 highest and
# 5 lowest PageRanks belong to
lolDistComms <- lol[lol$Community == 2265 |
lol$Community == 1291 |
lol$Community == 1032,]
# Grab only variants of (lol) over 1
variants <- table(lolDistComms$lol)
variantsGT <- names(variants[variants > 5])
variantsLT <- names(variants[variants <= 5])
lolDistComms <- lolDistComms[lolDistComms$lol == "lol" |
lolDistComms$lol == "Lol" |
lolDistComms$lol == "LOL",]
# Subset data frame including only users with the 5 highest PageRanks
lolDistUsersHigh <- lol[lol$User == usersSummaryActive[order(usersSummaryActive$PageRank, decreasing = TRUE),][1, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank, decreasing = TRUE),][2, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank, decreasing = TRUE),][3, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank, decreasing = TRUE),][4, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank, decreasing = TRUE),][5, "User"],]
lolDistUsersHigh <- lolDistUsersHigh[lolDistUsersHigh$lol == "lol" |
lolDistUsersHigh$lol == "Lol" |
lolDistUsersHigh$lol == "LOL",]
# Subset data frame including only users with the 5 lowest PageRanks
lolDistUsersLow <- lol[lol$User == usersSummaryActive[order(usersSummaryActive$PageRank),][5, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank),][4, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank),][3, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank),][2, "User"] |
lol$User == usersSummaryActive[order(usersSummaryActive$PageRank),][1, "User"],]
lolDistUsersLow <- lolDistUsersLow[lolDistUsersLow$lol == "lol" |
lolDistUsersLow$lol == "Lol" |
lolDistUsersLow$lol == "LOL",]
## Analysis
# Perform significance tests for each factor other than sentiment
lolSigTests <- lapply(list(lol$Community, lol$Province), function(column)
list(
fisher.test(lol$lol, column, simulate.p.value = TRUE),
cramersV(lol$lol, column))
)
names(lolSigTests) <- c("Community", "Province")
names(lolSigTests$Community) <- c("Test", "Effect")
names(lolSigTests$Province) <- c("Test", "Effect")
# Get sentiment of an example sentence to demonstrate the process
sentence <- lst(
words = c("great", "sentences", "can", "be", "ugly"),
sentiment = sentiment(paste(words, collapse = " ")),
word_sentiments = sapply(words, function (word) if (!any(lexicon::hash_sentiment_jockers_rinker == word)) {
0
} else {
lexicon::hash_sentiment_jockers_rinker[x == word, "y"]
}
)
)
# Perform a one-way ANOVA for the means of sentiments
lolSentSigTest <- aov(Sentiment ~ lol, data = lolSentMajorVars)
lolSentSigTestSummary <- summary(lolSentSigTest)
## ---- example_community ----
par(mar = c(2, 4, 3, 2))
plot.igraph(
graph(edges = c("Ted", "Bob", "Bob", "Joe", "Joe", "Kelly", "Kelly", "Bob"), directed = FALSE),
layout = layout_with_kk,
vertex.color = "yellow",
vertex.label.dist = 4
)
## ---- divByPR_graph_active ----
lolActive$PageRankLog <- log(lolActive$PageRank)
graphDivByCent("lolActive", "PageRankLog", "PageRank (log)")
## ---- divByPRPerc_graph_active ----
graphDivByCent("lolActive", "PR_Percentile", "PageRank Percentile")
## ---- lol_dist_comms ----
lolDistComms$community_ordered <- factor(lolDistComms$Community, levels = c("1291", "2265", "1032"))
ggplot(lolDistComms,
aes(x = lol)) +
facet_wrap(.~lolDistComms$community_ordered, ncol = 3) +
labs(x = "(lol)", y = "Relative Frequency") +
theme_bw() +
geom_bar(aes(y = ..count.. / sapply(PANEL, FUN = function(x) sum(count[PANEL == x]))))
## ---- lol_dist_users_high ----
graphlolDistUsers(lolDistUsersHigh)
## ---- lol_dist_users_low ----
graphlolDistUsers(lolDistUsersLow)
## ---- div_individual_community ----
ggplot(usersSummaryActive,
aes(x = Diversity,
y = Diversity_Comm)) +
scale_x_continuous(limits = c(0, 0.75)) +
scale_y_continuous(limits = c(0, 0.75)) +
labs(x = "Individual Diversity", y = "Community Diversity") +
theme_bw() +
geom_point() +
geom_abline(slope = 1, intercept = 0)
## ---- sentiment_lol_hist ----
ggplot(lolSentMajorVars,
aes(x = Sentiment, y = stat(density * width))) +
facet_grid(lol ~ .) +
labs(y = "Density") +
theme_bw() +
geom_histogram() +
geom_vline(data = filter(lolSentMajorVars, lol == "lol"), aes(xintercept = mean(Sentiment)), color = "red", lwd = 1) +
geom_vline(data = filter(lolSentMajorVars, lol == "Lol"), aes(xintercept = mean(Sentiment)), color = "red", lwd = 1) +
geom_vline(data = filter(lolSentMajorVars, lol == "LOL"), aes(xintercept = mean(Sentiment)), color = "red", lwd = 1)
|
#Read data into dataframe
d <- read.csv("~/coursera/EDA/household_power_consumption.txt", sep=";", quote="", stringsAsFactors=FALSE)
#Create duplicate dataframe for formatting data
d2<-d
#Replace ? with NA
d2[d2=="?"]<-NA
#Combine date and time into a new column DT
d2$DT<-paste(d$Date,d$Time)
#Convert all columns to correct classes
d2$Date<-as.Date(d$Date,"%d/%m/%Y")
d2$Global_active_power<-as.numeric(d2$Global_active_power)
d2$Global_reactive_power<-as.numeric(d2$Global_reactive_power)
d2$Voltage<-as.numeric(d2$Voltage)
d2$Global_intensity<-as.numeric(d2$Global_intensity)
d2$Sub_metering_1<-as.numeric(d2$Sub_metering_1)
d2$Sub_metering_2<-as.numeric(d2$Sub_metering_2)
d2$Sub_metering_3<-as.numeric(d2$Sub_metering_3)
d2$Time<-strptime(d2$DT,"%d/%m/%Y %H:%M:%S")
#Extract date time data
inte<-d2$Time[d2$Date>"2007-01-31" & d2$Date<"2007-02-03"]
#Extract Sub_metering_1,2 and 3 for relevant period
s1<-d2$Sub_metering_1[d2$Date>"2007-01-31" & d2$Date<"2007-02-03"]
s2<-d2$Sub_metering_2[d2$Date>"2007-01-31" & d2$Date<"2007-02-03"]
s3<-d2$Sub_metering_3[d2$Date>"2007-01-31" & d2$Date<"2007-02-03"]
#Open PNG graphics device
png(filename = "~/coursera/EDA/plot3.png",width = 480, height = 480, units = "px")
#Plot
plot(inte,s1,type="l", xlab="",ylab="Energy sub metering",ylim=c(0,40))
lines(inte,s2,col="red")
lines(inte,s3,col="blue")
# Add legend
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lwd=c(2.5,2.5,2.5),col=c("black","red","blue"))
#Close device
dev.off()
|
/plot3.r
|
no_license
|
abhk/ExData_Plotting1
|
R
| false
| false
| 1,511
|
r
|
#Read data into dataframe
d <- read.csv("~/coursera/EDA/household_power_consumption.txt", sep=";", quote="", stringsAsFactors=FALSE)
#Create duplicate dataframe for formatting data
d2<-d
#Replace ? with NA
d2[d2=="?"]<-NA
#Combine date and time into a new column DT
d2$DT<-paste(d$Date,d$Time)
#Convert all columns to correct classes
d2$Date<-as.Date(d$Date,"%d/%m/%Y")
d2$Global_active_power<-as.numeric(d2$Global_active_power)
d2$Global_reactive_power<-as.numeric(d2$Global_reactive_power)
d2$Voltage<-as.numeric(d2$Voltage)
d2$Global_intensity<-as.numeric(d2$Global_intensity)
d2$Sub_metering_1<-as.numeric(d2$Sub_metering_1)
d2$Sub_metering_2<-as.numeric(d2$Sub_metering_2)
d2$Sub_metering_3<-as.numeric(d2$Sub_metering_3)
d2$Time<-strptime(d2$DT,"%d/%m/%Y %H:%M:%S")
#Extract date time data
inte<-d2$Time[d2$Date>"2007-01-31" & d2$Date<"2007-02-03"]
#Extract Sub_metering_1,2 and 3 for relevant period
s1<-d2$Sub_metering_1[d2$Date>"2007-01-31" & d2$Date<"2007-02-03"]
s2<-d2$Sub_metering_2[d2$Date>"2007-01-31" & d2$Date<"2007-02-03"]
s3<-d2$Sub_metering_3[d2$Date>"2007-01-31" & d2$Date<"2007-02-03"]
#Open PNG graphics device
png(filename = "~/coursera/EDA/plot3.png",width = 480, height = 480, units = "px")
#Plot
plot(inte,s1,type="l", xlab="",ylab="Energy sub metering",ylim=c(0,40))
lines(inte,s2,col="red")
lines(inte,s3,col="blue")
# Add legend
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lwd=c(2.5,2.5,2.5),col=c("black","red","blue"))
#Close device
dev.off()
|
## Functions for creating and using inverted matrices which caching ability
## Creates cacheable matrix for inputting to
## cacheSolve() function which sets and gets
## the cached values
makeCacheMatrix <- function(original.matrix = matrix()) {
# Let's check if we have correct input
if (!is.matrix(original.matrix)) {
stop("Please give a matrix")
}
inverted.matrix <- NULL
set <- function(y) {
original.matrix <<- y
inverted.matrix <<- NULL
}
# Functions for getting and setting cached inv. matrix value
get <- function() original.matrix
# Inversing the matrix using build in solve() function in R
set.inverse <- function(solve) inverted.matrix <<- solve
get.inverse <- function() inverted.matrix
list(
set = set,
get = get,
set.inverse = set.inverse,
get.inverse = get.inverse)
}
## Computes the inverse of the cacheable matrix returned by makeCacheMatrix()
## If the inverse has already been calculated and there's no change in the matrix
## then the cacheSolve() returns the cached inverse
cacheSolve <- function(cacheable.matrix, ...) {
inverted.matrix <- cacheable.matrix$get.inverse()
# Do we have cached matrix available?
if(!is.null(inverted.matrix)) {
message("Getting cached inverse matrix")
return(inverted.matrix)
}
# Let's create inverted matrix in case
# there's no cached matrix available.
matrix.to.inverse <- cacheable.matrix$get()
inverted.matrix <- solve(matrix.to.inverse)
cacheable.matrix$set.inverse(inverted.matrix)
inverted.matrix
|
/cachematrix.R
|
no_license
|
shanewooten/ProgrammingAssignment2
|
R
| false
| false
| 1,563
|
r
|
## Functions for creating and using inverted matrices which caching ability
## Creates cacheable matrix for inputting to
## cacheSolve() function which sets and gets
## the cached values
makeCacheMatrix <- function(original.matrix = matrix()) {
# Let's check if we have correct input
if (!is.matrix(original.matrix)) {
stop("Please give a matrix")
}
inverted.matrix <- NULL
set <- function(y) {
original.matrix <<- y
inverted.matrix <<- NULL
}
# Functions for getting and setting cached inv. matrix value
get <- function() original.matrix
# Inversing the matrix using build in solve() function in R
set.inverse <- function(solve) inverted.matrix <<- solve
get.inverse <- function() inverted.matrix
list(
set = set,
get = get,
set.inverse = set.inverse,
get.inverse = get.inverse)
}
## Computes the inverse of the cacheable matrix returned by makeCacheMatrix()
## If the inverse has already been calculated and there's no change in the matrix
## then the cacheSolve() returns the cached inverse
cacheSolve <- function(cacheable.matrix, ...) {
inverted.matrix <- cacheable.matrix$get.inverse()
# Do we have cached matrix available?
if(!is.null(inverted.matrix)) {
message("Getting cached inverse matrix")
return(inverted.matrix)
}
# Let's create inverted matrix in case
# there's no cached matrix available.
matrix.to.inverse <- cacheable.matrix$get()
inverted.matrix <- solve(matrix.to.inverse)
cacheable.matrix$set.inverse(inverted.matrix)
inverted.matrix
|
# Assembles data for all five drugs by calling individual scripts consecutively
# Run load_core_citation. This script flattens Eric Livingston's data to generate
# a list of pmid1s of cited references derived from reviews of relevant literature
source("~//NETELabs_CaseStudies/Review_Master/load_core_citation_data.R")
# Run pazdur.R Assembles cited references from FDA Approval Summary papers
source("~//NETELabs_CaseStudies/Review_Master/pazdur.R")
# Run alemtuzumab_merge.R
source("~/NETELabs_CaseStudies/assembly/alemtuzumab_merge.R")
# Run imatinib_merge.R
source ("~/NETELabs_CaseStudies/assembly/imatinib_merge.R")
# Run nelarabine_merge.R
source ("~/NETELabs_CaseStudies/assembly/nelarabine_merge.R")
# Run ramucirumab_merge.R
source ("~/NETELabs_CaseStudies/assembly/ramucirumab_merge.R")
# Run sunitinib_merge.R
source ("~/NETELabs_CaseStudies/assembly/sunitinib_merge.R")
# Run metadata.R - collects edge counts by node type
source ("~/NETELabs_CaseStudies/assembly/metadata.R")
# Run five_pack to assemble data for Eric
source("~/NETELabs_CaseStudies/five_pack_stageI_assembly.R")
|
/assembly/master_script.R
|
no_license
|
ahasobriquets/NETELabs_CaseStudies
|
R
| false
| false
| 1,120
|
r
|
# Assembles data for all five drugs by calling individual scripts consecutively
# Run load_core_citation. This script flattens Eric Livingston's data to generate
# a list of pmid1s of cited references derived from reviews of relevant literature
source("~//NETELabs_CaseStudies/Review_Master/load_core_citation_data.R")
# Run pazdur.R Assembles cited references from FDA Approval Summary papers
source("~//NETELabs_CaseStudies/Review_Master/pazdur.R")
# Run alemtuzumab_merge.R
source("~/NETELabs_CaseStudies/assembly/alemtuzumab_merge.R")
# Run imatinib_merge.R
source ("~/NETELabs_CaseStudies/assembly/imatinib_merge.R")
# Run nelarabine_merge.R
source ("~/NETELabs_CaseStudies/assembly/nelarabine_merge.R")
# Run ramucirumab_merge.R
source ("~/NETELabs_CaseStudies/assembly/ramucirumab_merge.R")
# Run sunitinib_merge.R
source ("~/NETELabs_CaseStudies/assembly/sunitinib_merge.R")
# Run metadata.R - collects edge counts by node type
source ("~/NETELabs_CaseStudies/assembly/metadata.R")
# Run five_pack to assemble data for Eric
source("~/NETELabs_CaseStudies/five_pack_stageI_assembly.R")
|
/generalists-specialists/beals_species_pool_v0.6.r
|
no_license
|
zdealveindy/JUICE-R
|
R
| false
| false
| 15,914
|
r
| ||
#(1)-1
#1となる確率が0.3、試行回数3回の二項分布を10000回シミュレートし、そのヒストグラム、標本平均、標本分散を表示
n<-10000
Y=rbinom(n,3,.3)
hist(Y,probability=T,main="Y=X1+X2+X3",breaks=0:4,right=F)
mean(Y)
var(Y)
#(1)-2
#1となる確率が0.3、試行回数100回の二項分布を10000回シミュレートし、そのヒストグラム、標本平均、標本分散を表示
W=rbinom(n,100,.3)
hist(W,probability=T,main="W=X1+X2+c+X100",breaks=10:50)
mean(W)
var(W)
rm(list=ls(all=TRUE))
#(2)-1
#正規分布N(1,4)に従って分布している10000個の標本を10セット分乱数を発生させ、その和のヒストグラム、標本平均、標本分散を表示
X1<-rnorm(n,1,2)
X2<-rnorm(n,1,2)
X3<-rnorm(n,1,2)
X4<-rnorm(n,1,2)
X5<-rnorm(n,1,2)
X6<-rnorm(n,1,2)
X7<-rnorm(n,1,2)
X8<-rnorm(n,1,2)
X9<-rnorm(n,1,2)
X10<-rnorm(n,1,2)
Y=X1+X2+X3+X4+X5+X6+X7+X8+X9+X10
hist(Y,probability=T,main="Y=X1+X2+c+X10",breaks=50)
mean(Y)
var(Y)
#(2)-2
#(2)-1のデータを標準化し、そのヒストグラムを標準正規分布の密度関数の曲線と重ねて表示
Z=(Y-10)/sqrt(40)
hist(Z,probability=T,xlim=c(-4,4),ylim=c(0,.4),main="Z=(Y-10)/sqrt(40)",breaks=50)
par(new=T)
x<-seq(-4,4,0.1)
curve(dnorm(x,0,1),xlim=c(-4,4),ylim=c(0,.4),type="l")
|
/3.二項分布と標準化.R
|
no_license
|
Tsubasa1205/gumball
|
R
| false
| false
| 1,320
|
r
|
#(1)-1
#1となる確率が0.3、試行回数3回の二項分布を10000回シミュレートし、そのヒストグラム、標本平均、標本分散を表示
n<-10000
Y=rbinom(n,3,.3)
hist(Y,probability=T,main="Y=X1+X2+X3",breaks=0:4,right=F)
mean(Y)
var(Y)
#(1)-2
#1となる確率が0.3、試行回数100回の二項分布を10000回シミュレートし、そのヒストグラム、標本平均、標本分散を表示
W=rbinom(n,100,.3)
hist(W,probability=T,main="W=X1+X2+c+X100",breaks=10:50)
mean(W)
var(W)
rm(list=ls(all=TRUE))
#(2)-1
#正規分布N(1,4)に従って分布している10000個の標本を10セット分乱数を発生させ、その和のヒストグラム、標本平均、標本分散を表示
X1<-rnorm(n,1,2)
X2<-rnorm(n,1,2)
X3<-rnorm(n,1,2)
X4<-rnorm(n,1,2)
X5<-rnorm(n,1,2)
X6<-rnorm(n,1,2)
X7<-rnorm(n,1,2)
X8<-rnorm(n,1,2)
X9<-rnorm(n,1,2)
X10<-rnorm(n,1,2)
Y=X1+X2+X3+X4+X5+X6+X7+X8+X9+X10
hist(Y,probability=T,main="Y=X1+X2+c+X10",breaks=50)
mean(Y)
var(Y)
#(2)-2
#(2)-1のデータを標準化し、そのヒストグラムを標準正規分布の密度関数の曲線と重ねて表示
Z=(Y-10)/sqrt(40)
hist(Z,probability=T,xlim=c(-4,4),ylim=c(0,.4),main="Z=(Y-10)/sqrt(40)",breaks=50)
par(new=T)
x<-seq(-4,4,0.1)
curve(dnorm(x,0,1),xlim=c(-4,4),ylim=c(0,.4),type="l")
|
\name{sim.data}
\alias{sim.data}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Generates data for illustrative purposes}
\description{
Returns a random sample from some distributions,
to illustrate some visulization tools.
Returns also the density (as a piecewise constant function)
for some examples, or the distribution function.
}
\usage{
sim.data(n = NULL, seed = 1, N = NULL, type = "mulmod",
M = NULL, sig = NULL, p = NULL, d = NULL,
cova = NULL, marginal = NULL, t = NULL, df = NULL, distr = FALSE,
noisedim = 1, sig1 = 0.5, sig2 = 1.5, diff = 0.1, dist = 4)
}
\arguments{
\item{n}{positive integer; size of the sample to be generated}
\item{seed}{real number; seed for the random number generator.}
\item{N}{2*1 vector of positive integers; the size of the grid where
the piecewise constant function is evaluated }
\item{type}{"mixt", "mulmod", "fox", "tetra3d", "penta4d",
"cross", "gauss", "student", "gumbel", "1d2modal", or "claw".}
\item{M}{mixnum*d-matrix; rows of M are means of the Gaussians in
the mixture. We have a mixture of "mixnum" Gaussians, whose
dimension is d.}
\item{sig}{mixnum*d-matrix; rows of sig are the diagonals of the
covariance matrices of the mixtures.}
\item{p}{mixnum-vector; weights for the members of the mixture.
The sum of elements of "p" is 1.}
\item{d}{positive integer; dimension of the vectors of the sample
to be generated, need to be given only when type="mixt" and d=1}
\item{cova}{Covariance matrix for the Gauss or Student copulas}
\item{marginal}{NULL, "gauss", or "student"; this parameter is used to
give the marginal distribution for the Gauss or Student copulas;
if marginal=NULL, then the uniform marginals are used}
\item{t}{if marginal="student", gives the degrees of freedom}
\item{df}{degrees of freedom for the Student copula}
\item{distr}{internal (implemented for "1d2modal")
TRUE, if one wants the distribution function instead of the
density function}
\item{noisedim}{the number of noise dimension in the projection pursuit
example ("fssk") }
\item{sig1}{ standard deviation for "cross" and "diff1d" }
\item{sig2}{ second standard deviation for "cross" }
\item{diff}{ parameter for "diff1d"; the difference between the
Gaussians in the 1D mixture }
\item{dist}{a positive real number; gives the distance between the mixture
centers in the 4D mixture of Gaussians "penta4d"}
}
\details{
When type="mixt", generates data from a mixture of Gaussians.
When type="mulmod", the density is 3-modal.
When type="fox", the density has multimodal level sets.
}
\value{
If "n" is not NULL, then the function returns a n*d-data matrix or a
n*2-data matrix,
if "N" is not NULL, then the function returns a piecewise constant
function on the grid of size N[1]*N[2],
if the both are NULL, then the function returns the mean,
covariance, and the weights of the mixture components
}
\author{ Jussi Klemela }
%\seealso{\code{\link{simmix}}}
\examples{
d<-2
mixnum<-3
M<-matrix(0,mixnum,d)
M[1,]<-c(0,0)
M[2,]<-c(4,0)
M[3,]<-c(0,4)
sig<-matrix(1,mixnum,d)
p0<-1/mixnum
p<-p0*rep(1,mixnum)
n<-100
dendat<-sim.data(type="mixt",n=n,M=M,sig=sig,p=p,seed=1)
plot(dendat)
dendat<-sim.data(n=100)
plot(dendat)
N<-c(20,20)
pcf<-sim.data(N=N)
dp<-draw.pcf(pcf,pnum=c(30,30))
contour(dp$x,dp$y,dp$z,drawlabels=FALSE)
sim.data()
type="fox"
dendat<-sim.data(n=100,type=type)
plot(dendat)
pcf<-sim.data(N=N,type=type)
dp<-draw.pcf(pcf,pnum=c(30,30))
contour(dp$x,dp$y,dp$z,drawlabels=FALSE)
}
\keyword{datagen}% at least one, from doc/KEYWORDS
|
/man/sim.data.Rd
|
no_license
|
cran/denpro
|
R
| false
| false
| 3,605
|
rd
|
\name{sim.data}
\alias{sim.data}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Generates data for illustrative purposes}
\description{
Returns a random sample from some distributions,
to illustrate some visulization tools.
Returns also the density (as a piecewise constant function)
for some examples, or the distribution function.
}
\usage{
sim.data(n = NULL, seed = 1, N = NULL, type = "mulmod",
M = NULL, sig = NULL, p = NULL, d = NULL,
cova = NULL, marginal = NULL, t = NULL, df = NULL, distr = FALSE,
noisedim = 1, sig1 = 0.5, sig2 = 1.5, diff = 0.1, dist = 4)
}
\arguments{
\item{n}{positive integer; size of the sample to be generated}
\item{seed}{real number; seed for the random number generator.}
\item{N}{2*1 vector of positive integers; the size of the grid where
the piecewise constant function is evaluated }
\item{type}{"mixt", "mulmod", "fox", "tetra3d", "penta4d",
"cross", "gauss", "student", "gumbel", "1d2modal", or "claw".}
\item{M}{mixnum*d-matrix; rows of M are means of the Gaussians in
the mixture. We have a mixture of "mixnum" Gaussians, whose
dimension is d.}
\item{sig}{mixnum*d-matrix; rows of sig are the diagonals of the
covariance matrices of the mixtures.}
\item{p}{mixnum-vector; weights for the members of the mixture.
The sum of elements of "p" is 1.}
\item{d}{positive integer; dimension of the vectors of the sample
to be generated, need to be given only when type="mixt" and d=1}
\item{cova}{Covariance matrix for the Gauss or Student copulas}
\item{marginal}{NULL, "gauss", or "student"; this parameter is used to
give the marginal distribution for the Gauss or Student copulas;
if marginal=NULL, then the uniform marginals are used}
\item{t}{if marginal="student", gives the degrees of freedom}
\item{df}{degrees of freedom for the Student copula}
\item{distr}{internal (implemented for "1d2modal")
TRUE, if one wants the distribution function instead of the
density function}
\item{noisedim}{the number of noise dimension in the projection pursuit
example ("fssk") }
\item{sig1}{ standard deviation for "cross" and "diff1d" }
\item{sig2}{ second standard deviation for "cross" }
\item{diff}{ parameter for "diff1d"; the difference between the
Gaussians in the 1D mixture }
\item{dist}{a positive real number; gives the distance between the mixture
centers in the 4D mixture of Gaussians "penta4d"}
}
\details{
When type="mixt", generates data from a mixture of Gaussians.
When type="mulmod", the density is 3-modal.
When type="fox", the density has multimodal level sets.
}
\value{
If "n" is not NULL, then the function returns a n*d-data matrix or a
n*2-data matrix,
if "N" is not NULL, then the function returns a piecewise constant
function on the grid of size N[1]*N[2],
if the both are NULL, then the function returns the mean,
covariance, and the weights of the mixture components
}
\author{ Jussi Klemela }
%\seealso{\code{\link{simmix}}}
\examples{
d<-2
mixnum<-3
M<-matrix(0,mixnum,d)
M[1,]<-c(0,0)
M[2,]<-c(4,0)
M[3,]<-c(0,4)
sig<-matrix(1,mixnum,d)
p0<-1/mixnum
p<-p0*rep(1,mixnum)
n<-100
dendat<-sim.data(type="mixt",n=n,M=M,sig=sig,p=p,seed=1)
plot(dendat)
dendat<-sim.data(n=100)
plot(dendat)
N<-c(20,20)
pcf<-sim.data(N=N)
dp<-draw.pcf(pcf,pnum=c(30,30))
contour(dp$x,dp$y,dp$z,drawlabels=FALSE)
sim.data()
type="fox"
dendat<-sim.data(n=100,type=type)
plot(dendat)
pcf<-sim.data(N=N,type=type)
dp<-draw.pcf(pcf,pnum=c(30,30))
contour(dp$x,dp$y,dp$z,drawlabels=FALSE)
}
\keyword{datagen}% at least one, from doc/KEYWORDS
|
## Matrix inversion is usually a costly computation and
## there may be some benefit to caching the inverse of a matrix rather than
##compute it repeatedly
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## This function computes the inverse of the special "matrix" returned by
##makeCacheMatrix above. If the inverse has already been calculated
##(and the matrix has not changed), then the cachesolve should retrieve the
##inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("Data Input.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
nudandeepaa/ProgrammingAssignment2
|
R
| false
| false
| 1,055
|
r
|
## Matrix inversion is usually a costly computation and
## there may be some benefit to caching the inverse of a matrix rather than
##compute it repeatedly
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## This function computes the inverse of the special "matrix" returned by
##makeCacheMatrix above. If the inverse has already been calculated
##(and the matrix has not changed), then the cachesolve should retrieve the
##inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("Data Input.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{getMutual}
\alias{getMutual}
\title{Returns a list of user IDs of the mutual friends of two users.}
\usage{
getMutual(source_id = "", target_uid = "", target_uids = "", order = "",
count = "", offset = "")
}
\arguments{
\item{source_id}{ID of the user whose friends will be checked against the friends of the user specified in target_uid.}
\item{target_uid}{ID of the user whose friends will be checked against the friends of the user specified in source_uid.}
\item{target_uids}{List comma-separated positive numbers.}
\item{order}{Sort order: random — random order.}
\item{count}{Number of mutual friends to return.}
\item{offset}{Offset needed to return a specific subset of mutual friends.}
}
\value{
Returns a list of IDs (id) of the mutual friends of the source_uid and target_uid users.
}
\description{
Returns a list of user IDs of the mutual friends of two users.
}
\examples{
mutual_friends <- getMutual(target_uid='1')
}
|
/man/getMutual.Rd
|
no_license
|
GSuvorov/vkR
|
R
| false
| false
| 1,001
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{getMutual}
\alias{getMutual}
\title{Returns a list of user IDs of the mutual friends of two users.}
\usage{
getMutual(source_id = "", target_uid = "", target_uids = "", order = "",
count = "", offset = "")
}
\arguments{
\item{source_id}{ID of the user whose friends will be checked against the friends of the user specified in target_uid.}
\item{target_uid}{ID of the user whose friends will be checked against the friends of the user specified in source_uid.}
\item{target_uids}{List comma-separated positive numbers.}
\item{order}{Sort order: random — random order.}
\item{count}{Number of mutual friends to return.}
\item{offset}{Offset needed to return a specific subset of mutual friends.}
}
\value{
Returns a list of IDs (id) of the mutual friends of the source_uid and target_uid users.
}
\description{
Returns a list of user IDs of the mutual friends of two users.
}
\examples{
mutual_friends <- getMutual(target_uid='1')
}
|
\name{corr.test}
\alias{corr.test}
\alias{corr.p}
\title{Find the correlations, sample sizes, and probability values between elements of a matrix or data.frame. }
\description{Although the cor function finds the correlations for a matrix, it does not report probability values. cor.test does, but for only one pair of variables at a time. corr.test uses cor to find the correlations for either complete or pairwise data and reports the sample sizes and probability values as well. For symmetric matrices, raw probabilites are reported below the diagonal and correlations adjusted for multiple comparisons above the diagonal. In the case of different x and ys, the default is to adjust the probabilities for multiple tests. Both corr.test and corr.p return raw and adjusted confidence intervals for each correlation.
}
\usage{
corr.test(x, y = NULL, use = "pairwise",method="pearson",adjust="holm",
alpha=.05,ci=TRUE,minlength=5)
corr.p(r,n,adjust="holm",alpha=.05,minlength=5,ci=TRUE)
}
\arguments{
\item{x}{A matrix or dataframe }
\item{y}{A second matrix or dataframe with the same number of rows as x }
\item{use}{use="pairwise" is the default value and will do pairwise deletion of cases. use="complete" will select just complete cases. }
\item{method}{method="pearson" is the default value. The alternatives to be passed to cor are "spearman" and "kendall"}
\item{adjust}{What adjustment for multiple tests should be used? ("holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none"). See \code{\link{p.adjust}} for details about why to use "holm" rather than "bonferroni"). }
\item{alpha}{alpha level of confidence intervals}
\item{r}{A correlation matrix}
\item{n}{Number of observations if using corr.p. May be either a matrix (as returned from corr.test, or a scaler. Set to n - np if finding the significance of partial correlations. (See below). }
\item{ci}{By default, confidence intervals are found. However, this leads to a noticable slowdown of speed, particularly for large problems. So, for just the rs, ts and ps, set ci=FALSE}
\item{minlength}{What is the minimum length for abbreviations. Defaults to 5.}
}
\details{corr.test uses the \code{\link{cor}} function to find the correlations, and then applies a t-test to the individual correlations using the formula
\deqn{t = \frac{r * \sqrt(n-2)}{\sqrt(1-r^2)}
}{t = r* sqrt(n-2)/sqrt(1-r^2) }
\deqn{se = \sqrt(\frac{1-r^2}{n-2}) }{se = sqrt((1-r^2)/(n-2))}
The t and Standard Errors are returned as objects in the result, but are not normally displayed. Confidence intervals are found and printed if using the print(short=FALSE) option. These are found by using the fisher z transform of the correlation and then taking the range r +/- qnorm(alpha/2) * se and the standard error of the z transforms is \deqn{se = \sqrt(\frac {1}{n-3}) }{se = sqrt(1/(n-3))}. These values are then back transformed to be in correlation units. They are returned in the ci object.
The probability values may be adjusted using the Holm (or other) correction. If the matrix is symmetric (no y data), then the original p values are reported below the diagonal and the adjusted above the diagonal. Otherwise, all probabilities are adjusted (unless adjust="none"). This is made explicit in the output. Confidence intervals are shown for raw and adjusted probabilities in the ci object.
\code{\link{corr.p}} may be applied to the results of \code{\link{partial.r}} if n is set to n - s (where s is the number of variables partialed out) Fisher, 1924.
}
\value{
\item{r}{The matrix of correlations}
\item{n}{Number of cases per correlation}
\item{t}{value of t-test for each correlation}
\item{p}{two tailed probability of t for each correlation. For symmetric matrices, p values adjusted for multiple tests are reported above the diagonal. }
\item{se}{standard error of the correlation}
\item{ci}{the alpha/2 lower and upper values.}
\item{ci2}{ci but with the adjusted pvalues as well. This was added after tests showed we were breaking some packages that were calling the ci object without bothering to check for its dimensions.}
\item{ci.adj}{These are the adjusted ((Holm or Bonferroni) confidence intervals. If asking to not adjust, the Holm adjustments for the confidence intervals are shown anyway, but the probability values are not adjusted and the appropriate confidence intervals are shown in the ci object. }
}
\note{For very large matrices (> 200 x 200), there is a noticeable speed improvement if confidence intervals are not found.
That adjusted confidence intervals are shown even when asking for no adjustment might be confusing. If you don't want adjusted intervals, just use the ci object. The adjusted values are given in the ci.adj object. }
\seealso{ \code{\link{cor.test}} for tests of a single correlation, Hmisc::rcorr for an equivalant function, \code{\link{r.test}} to test the difference between correlations, and \code{\link{cortest.mat}} to test for equality of two correlation matrices.
Also see \code{\link{cor.ci}} for bootstrapped confidence intervals of Pearson, Spearman, Kendall, tetrachoric or polychoric correlations. In addition \code{\link{cor.ci}} will find bootstrapped estimates of composite scales based upon a set of correlations (ala \code{\link{cluster.cor}}).
In particular, see \code{\link{p.adjust}} for a discussion of p values associated with multiple tests.
Other useful functions related to finding and displaying correlations include \code{\link{lowerCor}} for finding the correlations and then displaying the lower off diagonal using the \code{\link{lowerMat}} function. \code{\link{lowerUpper}} to compare two correlation matrices.
}
\examples{
ct <- corr.test(attitude) #find the correlations and give the probabilities
ct #show the results
cts <- corr.test(attitude[1:3],attitude[4:6]) #reports all values corrected for multiple tests
#corr.test(sat.act[1:3],sat.act[4:6],adjust="none") #don't adjust the probabilities
#take correlations and show the probabilities as well as the confidence intervals
print(corr.p(cts$r,n=30),short=FALSE)
#don't adjust the probabilities
print(corr.test(sat.act[1:3],sat.act[4:6],adjust="none"),short=FALSE)
}
\keyword{multivariate }
\keyword{ models }
|
/man/corr.test.Rd
|
no_license
|
adamv227/psych
|
R
| false
| false
| 6,297
|
rd
|
\name{corr.test}
\alias{corr.test}
\alias{corr.p}
\title{Find the correlations, sample sizes, and probability values between elements of a matrix or data.frame. }
\description{Although the cor function finds the correlations for a matrix, it does not report probability values. cor.test does, but for only one pair of variables at a time. corr.test uses cor to find the correlations for either complete or pairwise data and reports the sample sizes and probability values as well. For symmetric matrices, raw probabilites are reported below the diagonal and correlations adjusted for multiple comparisons above the diagonal. In the case of different x and ys, the default is to adjust the probabilities for multiple tests. Both corr.test and corr.p return raw and adjusted confidence intervals for each correlation.
}
\usage{
corr.test(x, y = NULL, use = "pairwise",method="pearson",adjust="holm",
alpha=.05,ci=TRUE,minlength=5)
corr.p(r,n,adjust="holm",alpha=.05,minlength=5,ci=TRUE)
}
\arguments{
\item{x}{A matrix or dataframe }
\item{y}{A second matrix or dataframe with the same number of rows as x }
\item{use}{use="pairwise" is the default value and will do pairwise deletion of cases. use="complete" will select just complete cases. }
\item{method}{method="pearson" is the default value. The alternatives to be passed to cor are "spearman" and "kendall"}
\item{adjust}{What adjustment for multiple tests should be used? ("holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none"). See \code{\link{p.adjust}} for details about why to use "holm" rather than "bonferroni"). }
\item{alpha}{alpha level of confidence intervals}
\item{r}{A correlation matrix}
\item{n}{Number of observations if using corr.p. May be either a matrix (as returned from corr.test, or a scaler. Set to n - np if finding the significance of partial correlations. (See below). }
\item{ci}{By default, confidence intervals are found. However, this leads to a noticable slowdown of speed, particularly for large problems. So, for just the rs, ts and ps, set ci=FALSE}
\item{minlength}{What is the minimum length for abbreviations. Defaults to 5.}
}
\details{corr.test uses the \code{\link{cor}} function to find the correlations, and then applies a t-test to the individual correlations using the formula
\deqn{t = \frac{r * \sqrt(n-2)}{\sqrt(1-r^2)}
}{t = r* sqrt(n-2)/sqrt(1-r^2) }
\deqn{se = \sqrt(\frac{1-r^2}{n-2}) }{se = sqrt((1-r^2)/(n-2))}
The t and Standard Errors are returned as objects in the result, but are not normally displayed. Confidence intervals are found and printed if using the print(short=FALSE) option. These are found by using the fisher z transform of the correlation and then taking the range r +/- qnorm(alpha/2) * se and the standard error of the z transforms is \deqn{se = \sqrt(\frac {1}{n-3}) }{se = sqrt(1/(n-3))}. These values are then back transformed to be in correlation units. They are returned in the ci object.
The probability values may be adjusted using the Holm (or other) correction. If the matrix is symmetric (no y data), then the original p values are reported below the diagonal and the adjusted above the diagonal. Otherwise, all probabilities are adjusted (unless adjust="none"). This is made explicit in the output. Confidence intervals are shown for raw and adjusted probabilities in the ci object.
\code{\link{corr.p}} may be applied to the results of \code{\link{partial.r}} if n is set to n - s (where s is the number of variables partialed out) Fisher, 1924.
}
\value{
\item{r}{The matrix of correlations}
\item{n}{Number of cases per correlation}
\item{t}{value of t-test for each correlation}
\item{p}{two tailed probability of t for each correlation. For symmetric matrices, p values adjusted for multiple tests are reported above the diagonal. }
\item{se}{standard error of the correlation}
\item{ci}{the alpha/2 lower and upper values.}
\item{ci2}{ci but with the adjusted pvalues as well. This was added after tests showed we were breaking some packages that were calling the ci object without bothering to check for its dimensions.}
\item{ci.adj}{These are the adjusted ((Holm or Bonferroni) confidence intervals. If asking to not adjust, the Holm adjustments for the confidence intervals are shown anyway, but the probability values are not adjusted and the appropriate confidence intervals are shown in the ci object. }
}
\note{For very large matrices (> 200 x 200), there is a noticeable speed improvement if confidence intervals are not found.
That adjusted confidence intervals are shown even when asking for no adjustment might be confusing. If you don't want adjusted intervals, just use the ci object. The adjusted values are given in the ci.adj object. }
\seealso{ \code{\link{cor.test}} for tests of a single correlation, Hmisc::rcorr for an equivalant function, \code{\link{r.test}} to test the difference between correlations, and \code{\link{cortest.mat}} to test for equality of two correlation matrices.
Also see \code{\link{cor.ci}} for bootstrapped confidence intervals of Pearson, Spearman, Kendall, tetrachoric or polychoric correlations. In addition \code{\link{cor.ci}} will find bootstrapped estimates of composite scales based upon a set of correlations (ala \code{\link{cluster.cor}}).
In particular, see \code{\link{p.adjust}} for a discussion of p values associated with multiple tests.
Other useful functions related to finding and displaying correlations include \code{\link{lowerCor}} for finding the correlations and then displaying the lower off diagonal using the \code{\link{lowerMat}} function. \code{\link{lowerUpper}} to compare two correlation matrices.
}
\examples{
ct <- corr.test(attitude) #find the correlations and give the probabilities
ct #show the results
cts <- corr.test(attitude[1:3],attitude[4:6]) #reports all values corrected for multiple tests
#corr.test(sat.act[1:3],sat.act[4:6],adjust="none") #don't adjust the probabilities
#take correlations and show the probabilities as well as the confidence intervals
print(corr.p(cts$r,n=30),short=FALSE)
#don't adjust the probabilities
print(corr.test(sat.act[1:3],sat.act[4:6],adjust="none"),short=FALSE)
}
\keyword{multivariate }
\keyword{ models }
|
\name{pvamodel-class}
\Rdversion{1.1}
\docType{class}
\alias{pvamodel-class}
\title{Class \code{"pvamodel"}}
\description{
S4 class for predefined PVA models.
}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("pvamodel", ...)}.
}
\section{Slots}{
\describe{
\item{\code{growth.model}:}{Object of class \code{"character"},
name of growth model.}
\item{\code{obs.error}:}{Object of class \code{"character"},
name of observation error type (\code{"none"},
\code{"poisson"}, \code{"normal"}).}
\item{\code{model}:}{Object of class \code{"dcModel"},
BUGS model for estimation.}
\item{\code{genmodel}:}{Object of class \code{"dcModel"},
BUGS model for prediction.}
\item{\code{p}:}{Object of class \code{"integer"},
number of parameters in model (including fixed parameters!).}
\item{\code{support}:}{Object of class \code{"matrix"},
range of support for parameters (true parameter scale).}
\item{\code{params}:}{Object of class \code{"character"},
parameter names (diagnostic scale).}
\item{\code{varnames}:}{Object of class \code{"character"},
parameter names (true parameter scale).}
\item{\code{fixed}:}{Object of class \code{"nClones"},
named vector of fixed parameters.}
\item{\code{fancy}:}{Object of class \code{"character"},
fancy model description.}
\item{\code{transf}:}{Object of class \code{"function"},
function to transform from true parameter scale to diagnostic
scale (takes care of fixed value which are not part of the
MCMC output.}
\item{\code{backtransf}:}{Object of class \code{"function"},
function to transform from diagnostic scale to true parameter
scale (takes care of fixed value which are not part of the
MCMC output.}
\item{\code{logdensity}:}{Object of class \code{"function"},
function to calculate log density (used in model selection).}
\item{\code{neffective}:}{Object of class \code{"function"},
function to calculate effective sample size from the number
of observations.}
}
}
\section{Methods}{
No methods defined with class "pvamodel" in the signature.
}
\author{
Khurram Nadeem and Peter Solymos
}
\seealso{
\code{\link{pva}}
}
\examples{
showClass("pvamodel")
}
\keyword{classes}
|
/man/pvamodel-class.Rd
|
no_license
|
psolymos/PVAClone
|
R
| false
| false
| 2,341
|
rd
|
\name{pvamodel-class}
\Rdversion{1.1}
\docType{class}
\alias{pvamodel-class}
\title{Class \code{"pvamodel"}}
\description{
S4 class for predefined PVA models.
}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("pvamodel", ...)}.
}
\section{Slots}{
\describe{
\item{\code{growth.model}:}{Object of class \code{"character"},
name of growth model.}
\item{\code{obs.error}:}{Object of class \code{"character"},
name of observation error type (\code{"none"},
\code{"poisson"}, \code{"normal"}).}
\item{\code{model}:}{Object of class \code{"dcModel"},
BUGS model for estimation.}
\item{\code{genmodel}:}{Object of class \code{"dcModel"},
BUGS model for prediction.}
\item{\code{p}:}{Object of class \code{"integer"},
number of parameters in model (including fixed parameters!).}
\item{\code{support}:}{Object of class \code{"matrix"},
range of support for parameters (true parameter scale).}
\item{\code{params}:}{Object of class \code{"character"},
parameter names (diagnostic scale).}
\item{\code{varnames}:}{Object of class \code{"character"},
parameter names (true parameter scale).}
\item{\code{fixed}:}{Object of class \code{"nClones"},
named vector of fixed parameters.}
\item{\code{fancy}:}{Object of class \code{"character"},
fancy model description.}
\item{\code{transf}:}{Object of class \code{"function"},
function to transform from true parameter scale to diagnostic
scale (takes care of fixed value which are not part of the
MCMC output.}
\item{\code{backtransf}:}{Object of class \code{"function"},
function to transform from diagnostic scale to true parameter
scale (takes care of fixed value which are not part of the
MCMC output.}
\item{\code{logdensity}:}{Object of class \code{"function"},
function to calculate log density (used in model selection).}
\item{\code{neffective}:}{Object of class \code{"function"},
function to calculate effective sample size from the number
of observations.}
}
}
\section{Methods}{
No methods defined with class "pvamodel" in the signature.
}
\author{
Khurram Nadeem and Peter Solymos
}
\seealso{
\code{\link{pva}}
}
\examples{
showClass("pvamodel")
}
\keyword{classes}
|
# Generates all the molecular
# data for the simulated results
library(ape)
library(TreeSim)
library(phangorn)
library(NELSI)
date.branches <- function(s.tree) {
tree <- s.tree[[1]]
subs <- s.tree[[2]][, 6]
times <- s.tree[[2]][, 7]
print(cbind(subs, times))
dates <- unlist(Map(toString, times))[1:n.tips]
tree$tip.label <- paste(tree$tip.label, dates, sep='_')
tree
}
write.sequences <- function(seq.n){
seq <- seq.n[[1]]
i <- seq.n[[2]]
write.dna(as.DNAbin(seq), sprintf("HIV_SIM_%d.phy", i))
}
# set the seed for this run
set.seed(19902302)
# Number of guide trees to create
n.trees <- 2
n.tips <- 50
n.seqlen <- 630
# Parameters for the nucleotide simulation
# HKY85 with rate = kappa
kappa <- 6.0
Q <- c(kappa, 1, 1, 1, 1, kappa)
pi.vec <- c(0.42, 0.15, 0.15, 0.28)
seq.rate <- 2.0
# Parameters for initial guide tree(s)
# These are somewhat abitrarily set
lambda <- c(2)
mu <- c(2)
sampprob <-c(0.5)
times<-c(0)
# Parameters for the clock of the tree
sim.params <- list(rate = 1.0, noise = 0)
sim.clockmodel <- simulate.clock
#trees <- apply(matrix(rep(n.tips,n.trees)), 1, sim.bdsky.stt, lambdasky=lambda, deathsky=mu, timesky=times, sampprobsky=sampprob,rho=0,timestop=0)
#trees <- lapply(trees, function(x) {x[[1]]}) # undo the strange indexing that sim.bdsky forces on you
trees <- apply(matrix(rep(n.tips,n.trees)), 1, rtree)
sim.trees <- lapply(trees, sim.clockmodel, params=sim.params)
trees <- lapply(sim.trees, date.branches)
seqs <- lapply(trees, simSeq, l=n.seqlen, Q=Q, bf=pi.vec, rate=seq.rate)
apply(cbind(seqs, 1:n.trees), 1, write.sequences)
|
/simulated/simulate_data.R
|
no_license
|
jjh13/ape.rtt
|
R
| false
| false
| 1,589
|
r
|
# Generates all the molecular
# data for the simulated results
library(ape)
library(TreeSim)
library(phangorn)
library(NELSI)
date.branches <- function(s.tree) {
tree <- s.tree[[1]]
subs <- s.tree[[2]][, 6]
times <- s.tree[[2]][, 7]
print(cbind(subs, times))
dates <- unlist(Map(toString, times))[1:n.tips]
tree$tip.label <- paste(tree$tip.label, dates, sep='_')
tree
}
write.sequences <- function(seq.n){
seq <- seq.n[[1]]
i <- seq.n[[2]]
write.dna(as.DNAbin(seq), sprintf("HIV_SIM_%d.phy", i))
}
# set the seed for this run
set.seed(19902302)
# Number of guide trees to create
n.trees <- 2
n.tips <- 50
n.seqlen <- 630
# Parameters for the nucleotide simulation
# HKY85 with rate = kappa
kappa <- 6.0
Q <- c(kappa, 1, 1, 1, 1, kappa)
pi.vec <- c(0.42, 0.15, 0.15, 0.28)
seq.rate <- 2.0
# Parameters for initial guide tree(s)
# These are somewhat abitrarily set
lambda <- c(2)
mu <- c(2)
sampprob <-c(0.5)
times<-c(0)
# Parameters for the clock of the tree
sim.params <- list(rate = 1.0, noise = 0)
sim.clockmodel <- simulate.clock
#trees <- apply(matrix(rep(n.tips,n.trees)), 1, sim.bdsky.stt, lambdasky=lambda, deathsky=mu, timesky=times, sampprobsky=sampprob,rho=0,timestop=0)
#trees <- lapply(trees, function(x) {x[[1]]}) # undo the strange indexing that sim.bdsky forces on you
trees <- apply(matrix(rep(n.tips,n.trees)), 1, rtree)
sim.trees <- lapply(trees, sim.clockmodel, params=sim.params)
trees <- lapply(sim.trees, date.branches)
seqs <- lapply(trees, simSeq, l=n.seqlen, Q=Q, bf=pi.vec, rate=seq.rate)
apply(cbind(seqs, 1:n.trees), 1, write.sequences)
|
####Info####
"This file describes the work process for the validation of LDJump (Chapter 5.1.3. \"Validation of the update\"), which includes
1. Loading the data
2. Transforming
3. Visualizing in R
"
####Running LDJump####
#Loading the packages:
library(parallel)
library(seqinr)
library(vcfR)
library(ape)
library(LDJump)
#Setting the working directory:
"Please consider setting your working directory in the folder in which you have all the files that you will be using (VCF-file, Reference-File)"
setwd("~/Validation-Of-Update")
#The files we are using:
#Group 1:
ref_seq = "Reference_CH21_41187000_41290679.fasta"
vcf_file = "TSI_21_41187000_41290679.vcf"
fasta_seq = "TSI_CH21_41187000_41290679.fa"
startofseq = 41187000
endofseq = 41290679
#Group 2:
ref_seq = "41m_41m10k.fa"
vcf_file = "41m_41m10k.vcf"
fasta_seq = "my_fasta.fasta"
startofseq = 41000000
endofseq = 41010000
#Running the code:
##Using our VCF-File:
start_time_vcf <- Sys.time()
results_testing = LDJump(vcf_file, chr = 21 , segLength = 1000, cores = 6, pathPhi = "/home/roots/anaconda3/bin/Phi", format = "vcf", refName = ref_seq, lengthofseq = endofseq-startofseq, startofseq = startofseq, endofseq = endofseq)
end_time_vcf <- Sys.time()
time_taken_vcf <- end_time_vcf - start_time_vcf
##Using a FASTA-File of the same DNA sequence for comparison:
start_time_fasta <- Sys.time()
fasta_only_result = LDJump(fasta_seq , segLength = 1000, cores = 3, pathPhi = "/home/roots/anaconda3/bin/Phi", format = "fasta", refName = ref_seq)
end_time_fasta <- Sys.time()
time_taken_fasta <- end_time_fasta - start_time_fasta
#Comparison of the results:
postscript("ResultsVCF.pdf", horiz = F)
plot(results[[1]], xlab = "Segments", ylab = "Estimated Recombination Rate", main = "Estimated recombination map with LDJump using VCF")
dev.off()
postscript("ResultsFASTA.pdf", horiz = F)
plot(fasta_only_result[[1]], xlab = "Segments", ylab = "Estimated Recombination Rate", main = "Estimated recombination map with LDJump using FASTA")
dev.off()
####Plotting####
#Load dataset
load("results_comparison_vcf_fasta.RData")
#Transform VCF result
demoF <- results
mappingvalsF <- data.frame(steps = sort(c(demoF[[1]]$leftEnd, demoF[[1]]$rightEnd)),
values = as.vector(matrix(c(demoF[[1]]$value, demoF[[1]]$value), nrow=2, byrow=TRUE)),
Demography = FALSE)
#Plot
rmpvcf <- ggplot2::ggplot() +
geom_step(data = mappingvalsF,
mapping= aes(x = steps, y=values),
direction = "vh", linetype = 1) +
scale_y_continuous(breaks = seq(0,0.15,by=0.05))+
coord_cartesian(ylim=c(0, 0.15))+
ggtitle(expression(atop("Estimated recombination map using" , paste(italic("LDJump"), " with VCF format")))) +
xlab("Jumps between 1kb segments") +
ylab("Recombination rate per 1kb segment") +
theme(axis.text = element_text(size = 8),
plot.title=element_text(hjust=0.5),
legend.key.size = unit(0.5, "cm"))
#Transform FASTA result
demoF <- fasta_only_result
mappingvalsF <- data.frame(steps = sort(c(demoF[[1]]$leftEnd, demoF[[1]]$rightEnd)),
values = as.vector(matrix(c(demoF[[1]]$value, demoF[[1]]$value), nrow=2, byrow=TRUE)),
Demography = FALSE)
#Plot
rmpfasta <- ggplot2::ggplot() +
geom_step(data = mappingvalsF,
mapping= aes(x = steps, y=values),
direction = "vh", linetype = 1) +
scale_y_continuous(breaks = seq(0,0.15,by=0.05))+
coord_cartesian(ylim=c(0, 0.15))+
ggtitle(expression(atop("Estimated recombination map using" , paste(italic("LDJump"), " with FASTA format")))) +
xlab("Jumps between 1kb segments") +
ylab("Recombination rate per 1kb segment") +
theme(axis.text = element_text(size = 8),
plot.title=element_text(hjust=0.5),
legend.key.size = unit(0.5, "cm"),
axis.title.y = element_blank())
#Combine plots
combined <- cowplot::plot_grid(
rmpvcf,
rmpfasta,
ncol = 2,
align = "h",
labels = c("A", "B"))
#Save plot
ggsave(
filename="Comparison-VCF-FASTA.png",
plot = combined,
device = "png",
path = NULL,
scale = 1,
width = 20,
height = 10,
units = c("cm"),
dpi = 300,
limitsize = TRUE,
)
|
/Validation-Of-Update/git_validation_of_update.R
|
no_license
|
fardokhtsadat/LDJump-thesis
|
R
| false
| false
| 4,209
|
r
|
####Info####
"This file describes the work process for the validation of LDJump (Chapter 5.1.3. \"Validation of the update\"), which includes
1. Loading the data
2. Transforming
3. Visualizing in R
"
####Running LDJump####
#Loading the packages:
library(parallel)
library(seqinr)
library(vcfR)
library(ape)
library(LDJump)
#Setting the working directory:
"Please consider setting your working directory in the folder in which you have all the files that you will be using (VCF-file, Reference-File)"
setwd("~/Validation-Of-Update")
#The files we are using:
#Group 1:
ref_seq = "Reference_CH21_41187000_41290679.fasta"
vcf_file = "TSI_21_41187000_41290679.vcf"
fasta_seq = "TSI_CH21_41187000_41290679.fa"
startofseq = 41187000
endofseq = 41290679
#Group 2:
ref_seq = "41m_41m10k.fa"
vcf_file = "41m_41m10k.vcf"
fasta_seq = "my_fasta.fasta"
startofseq = 41000000
endofseq = 41010000
#Running the code:
##Using our VCF-File:
start_time_vcf <- Sys.time()
results_testing = LDJump(vcf_file, chr = 21 , segLength = 1000, cores = 6, pathPhi = "/home/roots/anaconda3/bin/Phi", format = "vcf", refName = ref_seq, lengthofseq = endofseq-startofseq, startofseq = startofseq, endofseq = endofseq)
end_time_vcf <- Sys.time()
time_taken_vcf <- end_time_vcf - start_time_vcf
##Using a FASTA-File of the same DNA sequence for comparison:
start_time_fasta <- Sys.time()
fasta_only_result = LDJump(fasta_seq , segLength = 1000, cores = 3, pathPhi = "/home/roots/anaconda3/bin/Phi", format = "fasta", refName = ref_seq)
end_time_fasta <- Sys.time()
time_taken_fasta <- end_time_fasta - start_time_fasta
#Comparison of the results:
postscript("ResultsVCF.pdf", horiz = F)
plot(results[[1]], xlab = "Segments", ylab = "Estimated Recombination Rate", main = "Estimated recombination map with LDJump using VCF")
dev.off()
postscript("ResultsFASTA.pdf", horiz = F)
plot(fasta_only_result[[1]], xlab = "Segments", ylab = "Estimated Recombination Rate", main = "Estimated recombination map with LDJump using FASTA")
dev.off()
####Plotting####
#Load dataset
load("results_comparison_vcf_fasta.RData")
#Transform VCF result
demoF <- results
mappingvalsF <- data.frame(steps = sort(c(demoF[[1]]$leftEnd, demoF[[1]]$rightEnd)),
values = as.vector(matrix(c(demoF[[1]]$value, demoF[[1]]$value), nrow=2, byrow=TRUE)),
Demography = FALSE)
#Plot
rmpvcf <- ggplot2::ggplot() +
geom_step(data = mappingvalsF,
mapping= aes(x = steps, y=values),
direction = "vh", linetype = 1) +
scale_y_continuous(breaks = seq(0,0.15,by=0.05))+
coord_cartesian(ylim=c(0, 0.15))+
ggtitle(expression(atop("Estimated recombination map using" , paste(italic("LDJump"), " with VCF format")))) +
xlab("Jumps between 1kb segments") +
ylab("Recombination rate per 1kb segment") +
theme(axis.text = element_text(size = 8),
plot.title=element_text(hjust=0.5),
legend.key.size = unit(0.5, "cm"))
#Transform FASTA result
demoF <- fasta_only_result
mappingvalsF <- data.frame(steps = sort(c(demoF[[1]]$leftEnd, demoF[[1]]$rightEnd)),
values = as.vector(matrix(c(demoF[[1]]$value, demoF[[1]]$value), nrow=2, byrow=TRUE)),
Demography = FALSE)
#Plot
rmpfasta <- ggplot2::ggplot() +
geom_step(data = mappingvalsF,
mapping= aes(x = steps, y=values),
direction = "vh", linetype = 1) +
scale_y_continuous(breaks = seq(0,0.15,by=0.05))+
coord_cartesian(ylim=c(0, 0.15))+
ggtitle(expression(atop("Estimated recombination map using" , paste(italic("LDJump"), " with FASTA format")))) +
xlab("Jumps between 1kb segments") +
ylab("Recombination rate per 1kb segment") +
theme(axis.text = element_text(size = 8),
plot.title=element_text(hjust=0.5),
legend.key.size = unit(0.5, "cm"),
axis.title.y = element_blank())
#Combine plots
combined <- cowplot::plot_grid(
rmpvcf,
rmpfasta,
ncol = 2,
align = "h",
labels = c("A", "B"))
#Save plot
ggsave(
filename="Comparison-VCF-FASTA.png",
plot = combined,
device = "png",
path = NULL,
scale = 1,
width = 20,
height = 10,
units = c("cm"),
dpi = 300,
limitsize = TRUE,
)
|
/simR_2N_Eventos.r
|
no_license
|
aalmeida89/AD_SimPLE
|
R
| false
| false
| 10,334
|
r
| ||
# Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCProperties Class
#'
#' @field hc.name
#' @field hc.tags
#' @field hc.mbean.name
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCProperties <- R6::R6Class(
'ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCProperties',
public = list(
`hc.name` = NULL,
`hc.tags` = NULL,
`hc.mbean.name` = NULL,
initialize = function(`hc.name`, `hc.tags`, `hc.mbean.name`){
if (!missing(`hc.name`)) {
stopifnot(R6::is.R6(`hc.name`))
self$`hc.name` <- `hc.name`
}
if (!missing(`hc.tags`)) {
stopifnot(R6::is.R6(`hc.tags`))
self$`hc.tags` <- `hc.tags`
}
if (!missing(`hc.mbean.name`)) {
stopifnot(R6::is.R6(`hc.mbean.name`))
self$`hc.mbean.name` <- `hc.mbean.name`
}
},
toJSON = function() {
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject <- list()
if (!is.null(self$`hc.name`)) {
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject[['hc.name']] <- self$`hc.name`$toJSON()
}
if (!is.null(self$`hc.tags`)) {
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject[['hc.tags']] <- self$`hc.tags`$toJSON()
}
if (!is.null(self$`hc.mbean.name`)) {
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject[['hc.mbean.name']] <- self$`hc.mbean.name`$toJSON()
}
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject
},
fromJSON = function(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesJson) {
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject <- jsonlite::fromJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesJson)
if (!is.null(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$`hc.name`)) {
hc.nameObject <- ConfigNodePropertyString$new()
hc.nameObject$fromJSON(jsonlite::toJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$hc.name, auto_unbox = TRUE))
self$`hc.name` <- hc.nameObject
}
if (!is.null(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$`hc.tags`)) {
hc.tagsObject <- ConfigNodePropertyArray$new()
hc.tagsObject$fromJSON(jsonlite::toJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$hc.tags, auto_unbox = TRUE))
self$`hc.tags` <- hc.tagsObject
}
if (!is.null(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$`hc.mbean.name`)) {
hc.mbean.nameObject <- ConfigNodePropertyString$new()
hc.mbean.nameObject$fromJSON(jsonlite::toJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$hc.mbean.name, auto_unbox = TRUE))
self$`hc.mbean.name` <- hc.mbean.nameObject
}
},
toJSONString = function() {
sprintf(
'{
"hc.name": %s,
"hc.tags": %s,
"hc.mbean.name": %s
}',
self$`hc.name`$toJSON(),
self$`hc.tags`$toJSON(),
self$`hc.mbean.name`$toJSON()
)
},
fromJSONString = function(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesJson) {
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject <- jsonlite::fromJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesJson)
ConfigNodePropertyStringObject <- ConfigNodePropertyString$new()
self$`hc.name` <- ConfigNodePropertyStringObject$fromJSON(jsonlite::toJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$hc.name, auto_unbox = TRUE))
ConfigNodePropertyArrayObject <- ConfigNodePropertyArray$new()
self$`hc.tags` <- ConfigNodePropertyArrayObject$fromJSON(jsonlite::toJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$hc.tags, auto_unbox = TRUE))
ConfigNodePropertyStringObject <- ConfigNodePropertyString$new()
self$`hc.mbean.name` <- ConfigNodePropertyStringObject$fromJSON(jsonlite::toJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$hc.mbean.name, auto_unbox = TRUE))
}
)
)
|
/clients/r/generated/R/ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCProperties.r
|
permissive
|
shinesolutions/swagger-aem-osgi
|
R
| false
| false
| 4,531
|
r
|
# Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCProperties Class
#'
#' @field hc.name
#' @field hc.tags
#' @field hc.mbean.name
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCProperties <- R6::R6Class(
'ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCProperties',
public = list(
`hc.name` = NULL,
`hc.tags` = NULL,
`hc.mbean.name` = NULL,
initialize = function(`hc.name`, `hc.tags`, `hc.mbean.name`){
if (!missing(`hc.name`)) {
stopifnot(R6::is.R6(`hc.name`))
self$`hc.name` <- `hc.name`
}
if (!missing(`hc.tags`)) {
stopifnot(R6::is.R6(`hc.tags`))
self$`hc.tags` <- `hc.tags`
}
if (!missing(`hc.mbean.name`)) {
stopifnot(R6::is.R6(`hc.mbean.name`))
self$`hc.mbean.name` <- `hc.mbean.name`
}
},
toJSON = function() {
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject <- list()
if (!is.null(self$`hc.name`)) {
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject[['hc.name']] <- self$`hc.name`$toJSON()
}
if (!is.null(self$`hc.tags`)) {
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject[['hc.tags']] <- self$`hc.tags`$toJSON()
}
if (!is.null(self$`hc.mbean.name`)) {
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject[['hc.mbean.name']] <- self$`hc.mbean.name`$toJSON()
}
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject
},
fromJSON = function(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesJson) {
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject <- jsonlite::fromJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesJson)
if (!is.null(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$`hc.name`)) {
hc.nameObject <- ConfigNodePropertyString$new()
hc.nameObject$fromJSON(jsonlite::toJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$hc.name, auto_unbox = TRUE))
self$`hc.name` <- hc.nameObject
}
if (!is.null(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$`hc.tags`)) {
hc.tagsObject <- ConfigNodePropertyArray$new()
hc.tagsObject$fromJSON(jsonlite::toJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$hc.tags, auto_unbox = TRUE))
self$`hc.tags` <- hc.tagsObject
}
if (!is.null(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$`hc.mbean.name`)) {
hc.mbean.nameObject <- ConfigNodePropertyString$new()
hc.mbean.nameObject$fromJSON(jsonlite::toJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$hc.mbean.name, auto_unbox = TRUE))
self$`hc.mbean.name` <- hc.mbean.nameObject
}
},
toJSONString = function() {
sprintf(
'{
"hc.name": %s,
"hc.tags": %s,
"hc.mbean.name": %s
}',
self$`hc.name`$toJSON(),
self$`hc.tags`$toJSON(),
self$`hc.mbean.name`$toJSON()
)
},
fromJSONString = function(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesJson) {
ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject <- jsonlite::fromJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesJson)
ConfigNodePropertyStringObject <- ConfigNodePropertyString$new()
self$`hc.name` <- ConfigNodePropertyStringObject$fromJSON(jsonlite::toJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$hc.name, auto_unbox = TRUE))
ConfigNodePropertyArrayObject <- ConfigNodePropertyArray$new()
self$`hc.tags` <- ConfigNodePropertyArrayObject$fromJSON(jsonlite::toJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$hc.tags, auto_unbox = TRUE))
ConfigNodePropertyStringObject <- ConfigNodePropertyString$new()
self$`hc.mbean.name` <- ConfigNodePropertyStringObject$fromJSON(jsonlite::toJSON(ComAdobeAemUpgradePrechecksHcImplDeprecateIndexesHCPropertiesObject$hc.mbean.name, auto_unbox = TRUE))
}
)
)
|
plot_pca <-
function(dataset_thinning,
pc1,
pc2,
color_var,
shape_var = NULL,
title_string = NULL) {
col1 <- rlang::sym(str_c("PC", as.character(pc1)))
col2 <- rlang::sym(str_c("PC", as.character(pc2)))
shape_var <- rlang::enquo(shape_var)
color_var <- rlang::enquo(color_var)
path <-
glue::glue(
"data/derived_data/smartpca/{dataset_thinning}/smartpca.{dataset_thinning}.evec"
)
val <-
read_lines(here::here(path), n_max = 1) %>%
str_remove("#eigvals:") %>%
str_squish() %>%
str_split(pattern = "[[:space:]]") %>%
flatten_chr() %>%
map_dbl( ~ as.numeric(.x))
vec <-
read_table2(here::here(path), skip = 1, col_names = FALSE) %>%
select(-X12) %>%
set_names(c("international_id", map_chr(1:10, ~ str_c("PC", .x)))) %>%
mutate(international_id = str_remove(international_id, ":[[:graph:]]+")) %>%
left_join(
read_csv(here::here("data/derived_data/metadata/bovine_demo.sample_metadata.csv")) %>%
mutate(international_id = if_else(international_id == "CPC98_Bprimigenius_EnglandDerbyshire_5936", "ancient_derbyshire", international_id)) %>%
select(international_id, population, region, continent, species)
) %>%
mutate(
species = stringr::str_to_sentence(species),
region = stringr::str_to_title(region),
continent = stringr::str_to_title(continent),
population = stringr::str_to_title(population)
)
plot <-
if (is.null(shape_var)) {
vec %>%
ggplot(aes(
x = !!col1,
y = !!col2
)) +
geom_point(aes(color = !!color_var),
alpha = 0.6)
} else{
vec %>%
ggplot(aes(
x = !!col1,
y = !!col2
)) +
geom_point(aes(color = !!color_var,
shape = !!shape_var),
alpha = 0.6)
}
plot <-
plot +
bakeoff::scale_color_bakeoff() +
guides(colour = guide_legend(override.aes = list(alpha = 1))) +
theme_classic() +
labs(
x = str_c("PC ", pc1, ": ", scales::percent(val[pc1] / sum(val))),
y = str_c("PC ", pc2, ": ", scales::percent(val[pc2] / sum(val))),
color = stringr::str_to_sentence(rlang::quo_name(rlang::enquo(color_var)))
)
plot <-
if (is.null(shape_var)) {
plot
} else{
plot +
labs(shape = stringr::str_to_sentence(rlang::quo_name(shape_var)))
}
plot <-
if(is.null(title_string)){
plot +
labs(title = glue::glue("{dataset_thinning}: PC {pc1} vs. PC {pc2}"))
} else{
plot +
labs(title = glue::glue("{title_string}: PC {pc1} vs. PC {pc2}"))
}
return(plot)
}
ggscree <- function(dataset_thinning, title_string = NULL) {
path <-
glue::glue(
"data/derived_data/smartpca/{dataset_thinning}/smartpca.{dataset_thinning}.evec"
)
val <-
read_lines(here::here(path), n_max = 1) %>%
str_remove("#eigvals:") %>%
str_squish() %>%
str_split(pattern = "[[:space:]]") %>%
flatten_chr() %>%
as_tibble() %>%
mutate(
PC = as.character(glue::glue("PC{row_number()}")),
PC = forcats::fct_inorder(PC),
value = as.double(value),
varexp = round((value / sum(value))*100, digits = 2)
)
plot <-
val %>%
ggplot2::ggplot() +
ggplot2::geom_col(mapping = ggplot2::aes(x = PC, y = varexp), stat = "identity") +
#scale_y_continuous(limits = c(0, 15)) +
geom_text(
aes(
x = PC,
y = varexp,
label = str_c(varexp, "%")
),
position = position_dodge(width = 0.9),
vjust = -0.25
) +
theme_classic() +
labs(x = NULL,
y = "% Variation explained")
plot <-
if(is.null(title_string)){
plot +
labs(title = dataset_thinning)
} else{
plot +
labs(title = title_string)
}
return(plot)
}
read_vec <-
function(dataset_thinning) {
path <-
glue::glue(
"data/derived_data/smartpca/{dataset_thinning}/smartpca.{dataset_thinning}.evec"
)
vec <-
read_table2(here::here(path), skip = 1, col_names = FALSE) %>%
select(-X12) %>%
set_names(c("international_id", map_chr(1:10, ~ str_c("PC", .x)))) %>%
mutate(international_id = str_remove(international_id, ":[[:graph:]]+")) %>%
left_join(
sample_metadata %>%
select(international_id, population, region, continent, species)
)
return(vec)
}
|
/source_functions/plot_pca.R
|
no_license
|
harlydurbin/bovine_demo
|
R
| false
| false
| 4,752
|
r
|
plot_pca <-
function(dataset_thinning,
pc1,
pc2,
color_var,
shape_var = NULL,
title_string = NULL) {
col1 <- rlang::sym(str_c("PC", as.character(pc1)))
col2 <- rlang::sym(str_c("PC", as.character(pc2)))
shape_var <- rlang::enquo(shape_var)
color_var <- rlang::enquo(color_var)
path <-
glue::glue(
"data/derived_data/smartpca/{dataset_thinning}/smartpca.{dataset_thinning}.evec"
)
val <-
read_lines(here::here(path), n_max = 1) %>%
str_remove("#eigvals:") %>%
str_squish() %>%
str_split(pattern = "[[:space:]]") %>%
flatten_chr() %>%
map_dbl( ~ as.numeric(.x))
vec <-
read_table2(here::here(path), skip = 1, col_names = FALSE) %>%
select(-X12) %>%
set_names(c("international_id", map_chr(1:10, ~ str_c("PC", .x)))) %>%
mutate(international_id = str_remove(international_id, ":[[:graph:]]+")) %>%
left_join(
read_csv(here::here("data/derived_data/metadata/bovine_demo.sample_metadata.csv")) %>%
mutate(international_id = if_else(international_id == "CPC98_Bprimigenius_EnglandDerbyshire_5936", "ancient_derbyshire", international_id)) %>%
select(international_id, population, region, continent, species)
) %>%
mutate(
species = stringr::str_to_sentence(species),
region = stringr::str_to_title(region),
continent = stringr::str_to_title(continent),
population = stringr::str_to_title(population)
)
plot <-
if (is.null(shape_var)) {
vec %>%
ggplot(aes(
x = !!col1,
y = !!col2
)) +
geom_point(aes(color = !!color_var),
alpha = 0.6)
} else{
vec %>%
ggplot(aes(
x = !!col1,
y = !!col2
)) +
geom_point(aes(color = !!color_var,
shape = !!shape_var),
alpha = 0.6)
}
plot <-
plot +
bakeoff::scale_color_bakeoff() +
guides(colour = guide_legend(override.aes = list(alpha = 1))) +
theme_classic() +
labs(
x = str_c("PC ", pc1, ": ", scales::percent(val[pc1] / sum(val))),
y = str_c("PC ", pc2, ": ", scales::percent(val[pc2] / sum(val))),
color = stringr::str_to_sentence(rlang::quo_name(rlang::enquo(color_var)))
)
plot <-
if (is.null(shape_var)) {
plot
} else{
plot +
labs(shape = stringr::str_to_sentence(rlang::quo_name(shape_var)))
}
plot <-
if(is.null(title_string)){
plot +
labs(title = glue::glue("{dataset_thinning}: PC {pc1} vs. PC {pc2}"))
} else{
plot +
labs(title = glue::glue("{title_string}: PC {pc1} vs. PC {pc2}"))
}
return(plot)
}
ggscree <- function(dataset_thinning, title_string = NULL) {
path <-
glue::glue(
"data/derived_data/smartpca/{dataset_thinning}/smartpca.{dataset_thinning}.evec"
)
val <-
read_lines(here::here(path), n_max = 1) %>%
str_remove("#eigvals:") %>%
str_squish() %>%
str_split(pattern = "[[:space:]]") %>%
flatten_chr() %>%
as_tibble() %>%
mutate(
PC = as.character(glue::glue("PC{row_number()}")),
PC = forcats::fct_inorder(PC),
value = as.double(value),
varexp = round((value / sum(value))*100, digits = 2)
)
plot <-
val %>%
ggplot2::ggplot() +
ggplot2::geom_col(mapping = ggplot2::aes(x = PC, y = varexp), stat = "identity") +
#scale_y_continuous(limits = c(0, 15)) +
geom_text(
aes(
x = PC,
y = varexp,
label = str_c(varexp, "%")
),
position = position_dodge(width = 0.9),
vjust = -0.25
) +
theme_classic() +
labs(x = NULL,
y = "% Variation explained")
plot <-
if(is.null(title_string)){
plot +
labs(title = dataset_thinning)
} else{
plot +
labs(title = title_string)
}
return(plot)
}
read_vec <-
function(dataset_thinning) {
path <-
glue::glue(
"data/derived_data/smartpca/{dataset_thinning}/smartpca.{dataset_thinning}.evec"
)
vec <-
read_table2(here::here(path), skip = 1, col_names = FALSE) %>%
select(-X12) %>%
set_names(c("international_id", map_chr(1:10, ~ str_c("PC", .x)))) %>%
mutate(international_id = str_remove(international_id, ":[[:graph:]]+")) %>%
left_join(
sample_metadata %>%
select(international_id, population, region, continent, species)
)
return(vec)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rho_bounds.R
\name{get_r_uz_bounds}
\alias{get_r_uz_bounds}
\title{Evaluates r_uz bounds given user restrictions on r_TstarU and kappa}
\usage{
get_r_uz_bounds(r_TstarU_lower, r_TstarU_upper, k_lower, k_upper, obs)
}
\arguments{
\item{r_TstarU_lower}{Vector of lower bounds of endogeneity}
\item{r_TstarU_upper}{Vector of upper bounds of endogeneity}
\item{k_lower}{Vector of lower bounds on measurement error}
\item{k_upper}{Vector of upper bounds on measurement error}
\item{obs}{Observables generated by get_observables}
}
\value{
2-column data frame of lower and upper bounds of r_uz
}
\description{
This function takes observables from the data and user beliefs over the extent
of measurement error (kappa) and the direction of endogeneity (r_TstarU) to
generate the implied bounds on instrument validity (r_uz)
}
|
/ivdoctr/man/get_r_uz_bounds.Rd
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| true
| 905
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rho_bounds.R
\name{get_r_uz_bounds}
\alias{get_r_uz_bounds}
\title{Evaluates r_uz bounds given user restrictions on r_TstarU and kappa}
\usage{
get_r_uz_bounds(r_TstarU_lower, r_TstarU_upper, k_lower, k_upper, obs)
}
\arguments{
\item{r_TstarU_lower}{Vector of lower bounds of endogeneity}
\item{r_TstarU_upper}{Vector of upper bounds of endogeneity}
\item{k_lower}{Vector of lower bounds on measurement error}
\item{k_upper}{Vector of upper bounds on measurement error}
\item{obs}{Observables generated by get_observables}
}
\value{
2-column data frame of lower and upper bounds of r_uz
}
\description{
This function takes observables from the data and user beliefs over the extent
of measurement error (kappa) and the direction of endogeneity (r_TstarU) to
generate the implied bounds on instrument validity (r_uz)
}
|
# Association rules
require(arules)
require(arulesViz)
require(ggplot2)
# Import the Groceciers dataset
data('Groceries')
dim(Groceries)
# Review the data levels that we're dealing with to understand what to look for.
head(itemInfo(Groceries))
# How many different item tyes are there and how frequent are they?
itemsLevel1 <- itemInfo(Groceries)['level1']
itemsLevel1 <- itemsLevel1 %>%
group_by(level1) %>%
summarise(count = n()) %>%
arrange(count)
# Bar plot showing Level 1 Item frequency
ggplot(itemsLevel1, aes(x=reorder(level1, -count), y=count)) +
geom_bar(stat='identity', fill='blue') +
theme(axis.text.x = element_text(angle = 15)) +
labs(x='Item (Level 1)', y='Frequency')
# Create a set of rules and compare results.
# Low support with high confidence
firstRules <- apriori(Groceries, parameter = list(supp=0.001, conf=0.9, target='rules'))
summary(firstRules)
plot(firstRules, jitter = 1)
# High support and low confidence
secondRules <- apriori(Groceries, parameter = list(supp=0.05, conf=0.01, target='rules'))
summary(secondRules)
plot(secondRules, jitter = 1)
# First set of rules generates 129 rules, all having a lift greoter than 1. Second set of rules generates
# 34 rules none with much confidence or support and only 6 having a lift greater than 1, meaning that
# they're mostly independent.
|
/Week 3/Alonso_Week3.R
|
no_license
|
sudz4/IST718
|
R
| false
| false
| 1,346
|
r
|
# Association rules
require(arules)
require(arulesViz)
require(ggplot2)
# Import the Groceciers dataset
data('Groceries')
dim(Groceries)
# Review the data levels that we're dealing with to understand what to look for.
head(itemInfo(Groceries))
# How many different item tyes are there and how frequent are they?
itemsLevel1 <- itemInfo(Groceries)['level1']
itemsLevel1 <- itemsLevel1 %>%
group_by(level1) %>%
summarise(count = n()) %>%
arrange(count)
# Bar plot showing Level 1 Item frequency
ggplot(itemsLevel1, aes(x=reorder(level1, -count), y=count)) +
geom_bar(stat='identity', fill='blue') +
theme(axis.text.x = element_text(angle = 15)) +
labs(x='Item (Level 1)', y='Frequency')
# Create a set of rules and compare results.
# Low support with high confidence
firstRules <- apriori(Groceries, parameter = list(supp=0.001, conf=0.9, target='rules'))
summary(firstRules)
plot(firstRules, jitter = 1)
# High support and low confidence
secondRules <- apriori(Groceries, parameter = list(supp=0.05, conf=0.01, target='rules'))
summary(secondRules)
plot(secondRules, jitter = 1)
# First set of rules generates 129 rules, all having a lift greoter than 1. Second set of rules generates
# 34 rules none with much confidence or support and only 6 having a lift greater than 1, meaning that
# they're mostly independent.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fviz_hmfa.R
\name{fviz_hmfa}
\alias{fviz_hmfa}
\alias{fviz_hmfa_ind}
\alias{fviz_hmfa_var}
\alias{fviz_hmfa_quali_biplot}
\title{Visualize Hierarchical Multiple Factor Analysis}
\usage{
fviz_hmfa_ind(X, axes = c(1, 2), geom = c("point", "text"),
repel = FALSE, habillage = "none", addEllipses = FALSE,
shape.ind = 19, col.ind = "blue", col.ind.sup = "darkblue",
alpha.ind = 1, select.ind = list(name = NULL, cos2 = NULL, contrib =
NULL), partial = NULL, col.partial = "group", group.names = NULL,
node.level = 1, ...)
fviz_hmfa_var(X, choice = c("quanti.var", "quali.var", "group"),
axes = c(1, 2), geom = c("point", "text"), repel = FALSE,
col.var = "red", alpha.var = 1, shape.var = 17,
col.var.sup = "darkgreen", select.var = list(name = NULL, cos2 =
NULL, contrib = NULL), ...)
fviz_hmfa_quali_biplot(X, axes = c(1, 2), geom = c("point", "text"),
repel = FALSE, habillage = "none",
title = "Biplot of individuals and qualitative variables - HMFA", ...)
fviz_hmfa(X, ...)
}
\arguments{
\item{X}{an object of class HMFA [FactoMineR].}
\item{axes}{a numeric vector of length 2 specifying the dimensions to be
plotted.}
\item{geom}{a text specifying the geometry to be used for the graph. Allowed
values are the combination of \code{c("point", "arrow", "text")}. Use
\code{"point"} (to show only points); \code{"text"} to show only labels;
\code{c("point", "text")} or \code{c("arrow", "text")} to show arrows and
texts. Using \code{c("arrow", "text")} is sensible only for the graph of
variables.}
\item{repel}{a boolean, whether to use ggrepel to avoid overplotting text
labels or not.}
\item{habillage}{an optional factor variable for coloring the observations by
groups. Default value is "none". If X is an HMFA object from FactoMineR
package, habillage can also specify the index of the factor variable in the
data.}
\item{addEllipses}{logical value. If TRUE, draws ellipses around the
individuals when habillage != "none".}
\item{shape.ind, shape.var}{point shapes of individuals and variables,
respectively.}
\item{col.ind, col.var}{color for individuals, partial individuals and
variables, respectively. Can be a continuous variable or a factor variable.
Possible values include also : "cos2", "contrib", "coord", "x" or "y". In
this case, the colors for individuals/variables are automatically controlled
by their qualities ("cos2"), contributions ("contrib"), coordinates (x^2 +
y^2 , "coord"), x values("x") or y values("y"). To use automatic coloring
(by cos2, contrib, ....), make sure that habillage ="none".}
\item{col.ind.sup}{color for supplementary individuals}
\item{alpha.ind, alpha.var}{controls the transparency of individual, partial
individual and variable, respectively. The value can variate from 0 (total
transparency) to 1 (no transparency). Default value is 1. Possible values
include also : "cos2", "contrib", "coord", "x" or "y". In this case, the
transparency for individual/variable colors are automatically controlled by
their qualities ("cos2"), contributions ("contrib"), coordinates (x^2 + y^2
, "coord"), x values("x") or y values("y"). To use this, make sure that
habillage ="none".}
\item{select.ind, select.var}{a selection of individuals and variables to be
drawn. Allowed values are NULL or a list containing the arguments name, cos2
or contrib: \itemize{ \item name is a character vector containing
individuals/variables to be drawn \item cos2 if cos2 is in [0, 1], ex: 0.6,
then individuals/variables with a cos2 > 0.6 are drawn. if cos2 > 1, ex: 5,
then the top 5 individuals/variables with the highest cos2 are drawn. \item
contrib if contrib > 1, ex: 5, then the top 5 individuals/variables with
the highest cos2 are drawn }}
\item{partial}{list of the individuals for which the partial points should be
drawn. (by default, partial = NULL and no partial points are drawn). Use
partial = "All" to visualize partial points for all individuals.}
\item{col.partial}{color for partial individuals. By default, points are
colored according to the groups.}
\item{group.names}{a vector containing the name of the groups (by default,
NULL and the group are named group.1, group.2 and so on).}
\item{node.level}{a single number indicating the HMFA node level to plot.}
\item{...}{Arguments to be passed to the function fviz() and ggpubr::ggpar()}
\item{choice}{the graph to plot. Allowed values include one of c("quanti.var",
"quali.var", "group") for plotting quantitative variables, qualitative
variables and group of variables, respectively.}
\item{col.var.sup}{color for supplementary variables.}
\item{title}{the title of the graph}
}
\value{
a ggplot
}
\description{
Hierarchical Multiple Factor Analysis (HMFA) is, an extension of
MFA, used in a situation where the data are organized into a hierarchical
structure. fviz_hmfa() provides ggplot2-based elegant visualization of HMFA
outputs from the R function: HMFA [FactoMineR].\cr\cr \itemize{
\item{fviz_hmfa_ind(): Graph of individuals} \item{fviz_hmfa_var(): Graph of
variables} \item{fviz_hmfa_quali_biplot(): Biplot of individuals and
qualitative variables} \item{fviz_hmfa(): An alias of fviz_hmfa_ind()} }
}
\examples{
# Hierarchical Multiple Factor Analysis
# ++++++++++++++++++++++++
# Install and load FactoMineR to compute MFA
# install.packages("FactoMineR")
library("FactoMineR")
data(wine)
hierar <- list(c(2,5,3,10,9,2), c(4,2))
res.hmfa <- HMFA(wine, H = hierar, type=c("n",rep("s",5)), graph = FALSE)
# Graph of individuals
# ++++++++++++++++++++
# Color of individuals: col.ind = "#2E9FDF"
# Use repel = TRUE to avoid overplotting (slow if many points)
fviz_hmfa_ind(res.hmfa, repel = TRUE, col.ind = "#2E9FDF")
# Color individuals by groups, add concentration ellipses
# Remove labels: label = "none".
# Change color palette to "jco". See ?ggpubr::ggpar
grp <- as.factor(wine[,1])
p <- fviz_hmfa_ind(res.hmfa, label="none", habillage=grp,
addEllipses=TRUE, palette = "jco")
print(p)
# Graph of variables
# ++++++++++++++++++++++++++++++++++++++++
# Quantitative variables
fviz_hmfa_var(res.hmfa, "quanti.var")
# Graph of categorical variable categories
fviz_hmfa_var(res.hmfa, "quali.var")
# Groups of variables (correlation square)
fviz_hmfa_var(res.hmfa, "group")
# Biplot of categorical variable categories and individuals
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
fviz_hmfa_quali_biplot(res.hmfa)
# Graph of partial individuals (starplot)
# +++++++++++++++++++++++++++++++++++++++
fviz_hmfa_ind(res.hmfa, partial = "all", palette = "Dark2")
}
\references{
http://www.sthda.com/english/
}
\author{
Fabian Mundt \email{f.mundt@inventionate.de}
Alboukadel Kassambara \email{alboukadel.kassambara@gmail.com}
}
|
/man/fviz_hmfa.Rd
|
no_license
|
mattocci27/factoextra
|
R
| false
| true
| 6,826
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fviz_hmfa.R
\name{fviz_hmfa}
\alias{fviz_hmfa}
\alias{fviz_hmfa_ind}
\alias{fviz_hmfa_var}
\alias{fviz_hmfa_quali_biplot}
\title{Visualize Hierarchical Multiple Factor Analysis}
\usage{
fviz_hmfa_ind(X, axes = c(1, 2), geom = c("point", "text"),
repel = FALSE, habillage = "none", addEllipses = FALSE,
shape.ind = 19, col.ind = "blue", col.ind.sup = "darkblue",
alpha.ind = 1, select.ind = list(name = NULL, cos2 = NULL, contrib =
NULL), partial = NULL, col.partial = "group", group.names = NULL,
node.level = 1, ...)
fviz_hmfa_var(X, choice = c("quanti.var", "quali.var", "group"),
axes = c(1, 2), geom = c("point", "text"), repel = FALSE,
col.var = "red", alpha.var = 1, shape.var = 17,
col.var.sup = "darkgreen", select.var = list(name = NULL, cos2 =
NULL, contrib = NULL), ...)
fviz_hmfa_quali_biplot(X, axes = c(1, 2), geom = c("point", "text"),
repel = FALSE, habillage = "none",
title = "Biplot of individuals and qualitative variables - HMFA", ...)
fviz_hmfa(X, ...)
}
\arguments{
\item{X}{an object of class HMFA [FactoMineR].}
\item{axes}{a numeric vector of length 2 specifying the dimensions to be
plotted.}
\item{geom}{a text specifying the geometry to be used for the graph. Allowed
values are the combination of \code{c("point", "arrow", "text")}. Use
\code{"point"} (to show only points); \code{"text"} to show only labels;
\code{c("point", "text")} or \code{c("arrow", "text")} to show arrows and
texts. Using \code{c("arrow", "text")} is sensible only for the graph of
variables.}
\item{repel}{a boolean, whether to use ggrepel to avoid overplotting text
labels or not.}
\item{habillage}{an optional factor variable for coloring the observations by
groups. Default value is "none". If X is an HMFA object from FactoMineR
package, habillage can also specify the index of the factor variable in the
data.}
\item{addEllipses}{logical value. If TRUE, draws ellipses around the
individuals when habillage != "none".}
\item{shape.ind, shape.var}{point shapes of individuals and variables,
respectively.}
\item{col.ind, col.var}{color for individuals, partial individuals and
variables, respectively. Can be a continuous variable or a factor variable.
Possible values include also : "cos2", "contrib", "coord", "x" or "y". In
this case, the colors for individuals/variables are automatically controlled
by their qualities ("cos2"), contributions ("contrib"), coordinates (x^2 +
y^2 , "coord"), x values("x") or y values("y"). To use automatic coloring
(by cos2, contrib, ....), make sure that habillage ="none".}
\item{col.ind.sup}{color for supplementary individuals}
\item{alpha.ind, alpha.var}{controls the transparency of individual, partial
individual and variable, respectively. The value can variate from 0 (total
transparency) to 1 (no transparency). Default value is 1. Possible values
include also : "cos2", "contrib", "coord", "x" or "y". In this case, the
transparency for individual/variable colors are automatically controlled by
their qualities ("cos2"), contributions ("contrib"), coordinates (x^2 + y^2
, "coord"), x values("x") or y values("y"). To use this, make sure that
habillage ="none".}
\item{select.ind, select.var}{a selection of individuals and variables to be
drawn. Allowed values are NULL or a list containing the arguments name, cos2
or contrib: \itemize{ \item name is a character vector containing
individuals/variables to be drawn \item cos2 if cos2 is in [0, 1], ex: 0.6,
then individuals/variables with a cos2 > 0.6 are drawn. if cos2 > 1, ex: 5,
then the top 5 individuals/variables with the highest cos2 are drawn. \item
contrib if contrib > 1, ex: 5, then the top 5 individuals/variables with
the highest cos2 are drawn }}
\item{partial}{list of the individuals for which the partial points should be
drawn. (by default, partial = NULL and no partial points are drawn). Use
partial = "All" to visualize partial points for all individuals.}
\item{col.partial}{color for partial individuals. By default, points are
colored according to the groups.}
\item{group.names}{a vector containing the name of the groups (by default,
NULL and the group are named group.1, group.2 and so on).}
\item{node.level}{a single number indicating the HMFA node level to plot.}
\item{...}{Arguments to be passed to the function fviz() and ggpubr::ggpar()}
\item{choice}{the graph to plot. Allowed values include one of c("quanti.var",
"quali.var", "group") for plotting quantitative variables, qualitative
variables and group of variables, respectively.}
\item{col.var.sup}{color for supplementary variables.}
\item{title}{the title of the graph}
}
\value{
a ggplot
}
\description{
Hierarchical Multiple Factor Analysis (HMFA) is, an extension of
MFA, used in a situation where the data are organized into a hierarchical
structure. fviz_hmfa() provides ggplot2-based elegant visualization of HMFA
outputs from the R function: HMFA [FactoMineR].\cr\cr \itemize{
\item{fviz_hmfa_ind(): Graph of individuals} \item{fviz_hmfa_var(): Graph of
variables} \item{fviz_hmfa_quali_biplot(): Biplot of individuals and
qualitative variables} \item{fviz_hmfa(): An alias of fviz_hmfa_ind()} }
}
\examples{
# Hierarchical Multiple Factor Analysis
# ++++++++++++++++++++++++
# Install and load FactoMineR to compute MFA
# install.packages("FactoMineR")
library("FactoMineR")
data(wine)
hierar <- list(c(2,5,3,10,9,2), c(4,2))
res.hmfa <- HMFA(wine, H = hierar, type=c("n",rep("s",5)), graph = FALSE)
# Graph of individuals
# ++++++++++++++++++++
# Color of individuals: col.ind = "#2E9FDF"
# Use repel = TRUE to avoid overplotting (slow if many points)
fviz_hmfa_ind(res.hmfa, repel = TRUE, col.ind = "#2E9FDF")
# Color individuals by groups, add concentration ellipses
# Remove labels: label = "none".
# Change color palette to "jco". See ?ggpubr::ggpar
grp <- as.factor(wine[,1])
p <- fviz_hmfa_ind(res.hmfa, label="none", habillage=grp,
addEllipses=TRUE, palette = "jco")
print(p)
# Graph of variables
# ++++++++++++++++++++++++++++++++++++++++
# Quantitative variables
fviz_hmfa_var(res.hmfa, "quanti.var")
# Graph of categorical variable categories
fviz_hmfa_var(res.hmfa, "quali.var")
# Groups of variables (correlation square)
fviz_hmfa_var(res.hmfa, "group")
# Biplot of categorical variable categories and individuals
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
fviz_hmfa_quali_biplot(res.hmfa)
# Graph of partial individuals (starplot)
# +++++++++++++++++++++++++++++++++++++++
fviz_hmfa_ind(res.hmfa, partial = "all", palette = "Dark2")
}
\references{
http://www.sthda.com/english/
}
\author{
Fabian Mundt \email{f.mundt@inventionate.de}
Alboukadel Kassambara \email{alboukadel.kassambara@gmail.com}
}
|
\name{ycinterextra-package}
\alias{ycinterextra-package}
\alias{ycinterextra}
\docType{package}
\title{Yield curve or zero-coupon prices interpolation and extrapolation}
\description{
Yield curve or zero-coupon prices interpolation and extrapolation using the Nelson-Siegel, Svensson, Smith-Wilson models, and Hermite cubic splines.}
\details{
\tabular{ll}{
Package: \tab ycinterextra\cr
Type: \tab Package\cr
Version: \tab 0.1\cr
Date: \tab 2013-12-18\cr
License: \tab GPL-2 | GPL-3\cr
}
}
\author{
Thierry Moudiki
Maintainer: <thierry.moudiki@gmail.com>
}
\references{
Bolder, D. and Streliski, D. (1999). Yield curve modelling at the bank of canada.
Available at SSRN 1082845. \cr
CEIOPS (2010). Qis 5 risk-free interest rates extrapolation method. Technical
report, CEIOPS. \cr
FINANSTILSYNET (2010). A technical note on the Smith-Wilson method. \cr
Gilli, M., Grosse, S., and Schumann, E. (2010). Calibrating the Nelson-Siegel Svensson model. Available at SSRN 1676747. \cr
Moudiki, T. (2013). mcGlobaloptim : Global optimization using Monte Carlo and
Quasi Monte Carlo simulation. R package version 0.1. Available on CRAN. \cr
Nelson, C. R. and Siegel, A. F. (1987). Parsimonious modeling of yield curves.
Journal of Business, pages 473-489. \cr
Smith, A. and Wilson, T. (2001). Fitting yield curves with long term
constraints. Technical report, Bacon & Woodrow. Research Notes, Bacon
and Woodrow. \cr
Svensson, L. E. (1995). Estimating forward interest rates with the extended
Nelson & Siegel method. Sveriges Riksbank Quarterly Review, 3(1) :13-26. \cr
}
|
/man/ycinterextra-package.Rd
|
no_license
|
cran/ycinterextra
|
R
| false
| false
| 1,618
|
rd
|
\name{ycinterextra-package}
\alias{ycinterextra-package}
\alias{ycinterextra}
\docType{package}
\title{Yield curve or zero-coupon prices interpolation and extrapolation}
\description{
Yield curve or zero-coupon prices interpolation and extrapolation using the Nelson-Siegel, Svensson, Smith-Wilson models, and Hermite cubic splines.}
\details{
\tabular{ll}{
Package: \tab ycinterextra\cr
Type: \tab Package\cr
Version: \tab 0.1\cr
Date: \tab 2013-12-18\cr
License: \tab GPL-2 | GPL-3\cr
}
}
\author{
Thierry Moudiki
Maintainer: <thierry.moudiki@gmail.com>
}
\references{
Bolder, D. and Streliski, D. (1999). Yield curve modelling at the bank of canada.
Available at SSRN 1082845. \cr
CEIOPS (2010). Qis 5 risk-free interest rates extrapolation method. Technical
report, CEIOPS. \cr
FINANSTILSYNET (2010). A technical note on the Smith-Wilson method. \cr
Gilli, M., Grosse, S., and Schumann, E. (2010). Calibrating the Nelson-Siegel Svensson model. Available at SSRN 1676747. \cr
Moudiki, T. (2013). mcGlobaloptim : Global optimization using Monte Carlo and
Quasi Monte Carlo simulation. R package version 0.1. Available on CRAN. \cr
Nelson, C. R. and Siegel, A. F. (1987). Parsimonious modeling of yield curves.
Journal of Business, pages 473-489. \cr
Smith, A. and Wilson, T. (2001). Fitting yield curves with long term
constraints. Technical report, Bacon & Woodrow. Research Notes, Bacon
and Woodrow. \cr
Svensson, L. E. (1995). Estimating forward interest rates with the extended
Nelson & Siegel method. Sveriges Riksbank Quarterly Review, 3(1) :13-26. \cr
}
|
library("deSolve")
library("reshape2")
library("ggplot2"); theme_set(theme_classic())
source("../simFuns.R") ## for transform.list
source("../hivFuns.R")
source("../Param.R")
source("../hivModels.R")
library("gridExtra")
get_rval <- function(g, yini, pp, plot.it=FALSE,
tvec = c(1:500),
lims=c(0.01,0.05),
verbose=FALSE) {
start <- unlist(yini)
r <- rk(y=start,
times=tvec,
func=g,
parms=pp, method = "ode45")
Itot <- r[,(ncol(r)-1)]
if(max(Itot) < 5e-2){
return(0)
}else{
dd <- data.frame(tvec=tvec,Itot=Itot)
dsub <- subset(dd, Itot>lims[1] & Itot<lims[2])
mm <- try(lm(log(Itot)~tvec,data=dsub))
if (plot.it) {
plot(log(Itot)~tvec,data=dsub)
abline(mm,col=2)
}
cc <- coef(mm)[2]
return(cc)
}
}
HIVpars.shirreff <- transform(HIVpars.shirreff, ini_V = 3, kappa = 0, mu = 1)
tvec = seq(from =1, to =600, by = 0.1)
I_matS <- matrix(NA, nrow = length(tvec), ncol = 3)
vir_matS <- matrix(NA, nrow = length(tvec), ncol = 3)
val_vecS <- rep(NA, 3)
ini_r <- c(0.021, 0.042, 0.084)
if (file.exists("ev_LHS_resS.rda")) {
load("ev_LHS_resS.rda")
} else {
for(i in 1:3){
cat(i)
HIVpars <- transform(HIVpars.shirreff, Ini_I = 0.001)
yini <- calc_yini3(HIVpars)
little_r <- get_rval(gfun5(HIVpars),yini, HIVpars)
if(little_r < ini_r[i]){
interval = c(1,2)
}else{
interval = c(0.01,1)
}
val_vecS[i] = find_scale(ini_r[i], gfun5, yini, HIVpars, interval=interval, adjpar = "scale_all")
HIVpars_adj <- transform(HIVpars, scale_all = val_vecS[i])
r <- rk(unlist(yini), func = gfun5(HIVpars), parms = HIVpars_adj, times = tvec, method = "ode45")
I_matS[,i] = r[,(ncol(r) - 1)]
vir_matS[,i] = r[,ncol(r)]
}
save("I_matS", "vir_matS", "val_vecS",file="ev_LHS_resS.rda")
}
I_matS2 <- matrix(NA, nrow = length(tvec), ncol = 3)
vir_matS2 <- matrix(NA, nrow = length(tvec), ncol = 3)
val_vecS2 <- rep(NA, 3)
iniI_vec <- c(0.001, 0.0001, 0.00001)
if (file.exists("ev_LHS_resS2.rda")) {
load("ev_LHS_resS2.rda")
} else {
for(i in 1:3){
cat(i)
HIVpars <- transform(HIVpars.shirreff, Ini_I = iniI_vec[i])
yini <- calc_yini3(HIVpars)
little_r <- get_rval(gfun5(HIVpars),yini, HIVpars)
if(little_r < 0.042){
interval = c(1,2)
}else{
interval = c(0.01,1)
}
val_vecS2[i] = find_scale(0.042, gfun5, yini, HIVpars, interval=interval, adjpar = "scale_all")
HIVpars_adj <- transform(HIVpars, scale_all = val_vecS2[i])
r <- lsoda(unlist(yini), func = gfun5(HIVpars), parms = HIVpars_adj, times = tvec)
I_matS2[,i] = r[,(ncol(r) - 1)]
vir_matS2[,i] = r[,ncol(r)]
}
save("I_matS2", "vir_matS2", "val_vecS2",file="ev_LHS_resS2.rda")
}
I_matS3 <- matrix(NA, nrow = length(tvec), ncol = 3)
vir_matS3 <- matrix(NA, nrow = length(tvec), ncol = 3)
val_vecS3 <- rep(NA, 3)
iniI_vir <- c(2.5, 3, 3.5)
if (file.exists("ev_LHS_resS3.rda")) {
load("ev_LHS_resS3.rda")
} else {
for(i in 1:3){
cat(i)
HIVpars <- transform(HIVpars.shirreff, Ini_I = 0.001, ini_V = iniI_vir[i])
yini <- calc_yini3(HIVpars)
little_r <- get_rval(gfun5(HIVpars),yini, HIVpars)
if(little_r < 0.042){
interval = c(1,2)
}else{
interval = c(0.01,1)
}
val_vecS3[i] = find_scale(0.042, gfun5, yini, HIVpars, interval=interval, adjpar = "scale_all")
HIVpars_adj <- transform(HIVpars, scale_all = val_vecS3[i])
r <- lsoda(unlist(yini), func = gfun5(HIVpars), parms = HIVpars_adj, times = tvec)
I_matS3[,i] = r[,(ncol(r) - 1)]
vir_matS3[,i] = r[,ncol(r)]
}
save("I_matS3", "vir_matS3", "val_vecS3",file="ev_LHS_resS3.rda")
}
|
/R/simulations/figure1.R
|
no_license
|
kronga/HIV_LHS
|
R
| false
| false
| 3,857
|
r
|
library("deSolve")
library("reshape2")
library("ggplot2"); theme_set(theme_classic())
source("../simFuns.R") ## for transform.list
source("../hivFuns.R")
source("../Param.R")
source("../hivModels.R")
library("gridExtra")
get_rval <- function(g, yini, pp, plot.it=FALSE,
tvec = c(1:500),
lims=c(0.01,0.05),
verbose=FALSE) {
start <- unlist(yini)
r <- rk(y=start,
times=tvec,
func=g,
parms=pp, method = "ode45")
Itot <- r[,(ncol(r)-1)]
if(max(Itot) < 5e-2){
return(0)
}else{
dd <- data.frame(tvec=tvec,Itot=Itot)
dsub <- subset(dd, Itot>lims[1] & Itot<lims[2])
mm <- try(lm(log(Itot)~tvec,data=dsub))
if (plot.it) {
plot(log(Itot)~tvec,data=dsub)
abline(mm,col=2)
}
cc <- coef(mm)[2]
return(cc)
}
}
HIVpars.shirreff <- transform(HIVpars.shirreff, ini_V = 3, kappa = 0, mu = 1)
tvec = seq(from =1, to =600, by = 0.1)
I_matS <- matrix(NA, nrow = length(tvec), ncol = 3)
vir_matS <- matrix(NA, nrow = length(tvec), ncol = 3)
val_vecS <- rep(NA, 3)
ini_r <- c(0.021, 0.042, 0.084)
if (file.exists("ev_LHS_resS.rda")) {
load("ev_LHS_resS.rda")
} else {
for(i in 1:3){
cat(i)
HIVpars <- transform(HIVpars.shirreff, Ini_I = 0.001)
yini <- calc_yini3(HIVpars)
little_r <- get_rval(gfun5(HIVpars),yini, HIVpars)
if(little_r < ini_r[i]){
interval = c(1,2)
}else{
interval = c(0.01,1)
}
val_vecS[i] = find_scale(ini_r[i], gfun5, yini, HIVpars, interval=interval, adjpar = "scale_all")
HIVpars_adj <- transform(HIVpars, scale_all = val_vecS[i])
r <- rk(unlist(yini), func = gfun5(HIVpars), parms = HIVpars_adj, times = tvec, method = "ode45")
I_matS[,i] = r[,(ncol(r) - 1)]
vir_matS[,i] = r[,ncol(r)]
}
save("I_matS", "vir_matS", "val_vecS",file="ev_LHS_resS.rda")
}
I_matS2 <- matrix(NA, nrow = length(tvec), ncol = 3)
vir_matS2 <- matrix(NA, nrow = length(tvec), ncol = 3)
val_vecS2 <- rep(NA, 3)
iniI_vec <- c(0.001, 0.0001, 0.00001)
if (file.exists("ev_LHS_resS2.rda")) {
load("ev_LHS_resS2.rda")
} else {
for(i in 1:3){
cat(i)
HIVpars <- transform(HIVpars.shirreff, Ini_I = iniI_vec[i])
yini <- calc_yini3(HIVpars)
little_r <- get_rval(gfun5(HIVpars),yini, HIVpars)
if(little_r < 0.042){
interval = c(1,2)
}else{
interval = c(0.01,1)
}
val_vecS2[i] = find_scale(0.042, gfun5, yini, HIVpars, interval=interval, adjpar = "scale_all")
HIVpars_adj <- transform(HIVpars, scale_all = val_vecS2[i])
r <- lsoda(unlist(yini), func = gfun5(HIVpars), parms = HIVpars_adj, times = tvec)
I_matS2[,i] = r[,(ncol(r) - 1)]
vir_matS2[,i] = r[,ncol(r)]
}
save("I_matS2", "vir_matS2", "val_vecS2",file="ev_LHS_resS2.rda")
}
I_matS3 <- matrix(NA, nrow = length(tvec), ncol = 3)
vir_matS3 <- matrix(NA, nrow = length(tvec), ncol = 3)
val_vecS3 <- rep(NA, 3)
iniI_vir <- c(2.5, 3, 3.5)
if (file.exists("ev_LHS_resS3.rda")) {
load("ev_LHS_resS3.rda")
} else {
for(i in 1:3){
cat(i)
HIVpars <- transform(HIVpars.shirreff, Ini_I = 0.001, ini_V = iniI_vir[i])
yini <- calc_yini3(HIVpars)
little_r <- get_rval(gfun5(HIVpars),yini, HIVpars)
if(little_r < 0.042){
interval = c(1,2)
}else{
interval = c(0.01,1)
}
val_vecS3[i] = find_scale(0.042, gfun5, yini, HIVpars, interval=interval, adjpar = "scale_all")
HIVpars_adj <- transform(HIVpars, scale_all = val_vecS3[i])
r <- lsoda(unlist(yini), func = gfun5(HIVpars), parms = HIVpars_adj, times = tvec)
I_matS3[,i] = r[,(ncol(r) - 1)]
vir_matS3[,i] = r[,ncol(r)]
}
save("I_matS3", "vir_matS3", "val_vecS3",file="ev_LHS_resS3.rda")
}
|
# This sets the size of plots to a good default.
options(repr.plot.width = 5, repr.plot.height = 4)
# Loading in packages
library(tidyverse)
# Reading in the data
data <- read_csv("candy_crush.csv")
# Printing out the first couple of rows
head(data)
print("Number of players:")
length(unique(data$player_id))
print("Period for which we have data:")
range(data$dt)
# Calculating level difficulty
difficulty <- data %>%
group_by(level) %>%
summarise(wins = sum(num_success), attempts = sum(num_attempts)) %>%
mutate(p_win = wins / attempts)
# Printing out the level difficulty
difficulty
# Plotting the level difficulty profile
ggplot(difficulty, aes(x = level, y = p_win)) +
geom_line(color='red') +
scale_x_continuous(breaks = c(1:15)) +
scale_y_continuous(labels = scales::percent)
# Adding points and a dashed line
ggplot(difficulty, aes(x = level, y = p_win)) +
geom_line(color='red') +
geom_point(color='darkblue') +
geom_hline(yintercept = 0.10, linetype = "dashed",color='darkgreen') +
scale_x_continuous(breaks = c(1:15)) +
scale_y_continuous(labels = scales::percent)
# Computing the standard error of p_win for each level
difficulty <- difficulty %>%
mutate(error = sqrt(p_win * (1 - p_win) / attempts))
# Adding standard error bars
ggplot(difficulty, aes(x = level, y = p_win)) +
geom_line(color='yellow') +
geom_point(color='blue') +
geom_hline(yintercept = 0.10, linetype = "dashed",color='darkgreen') +
geom_errorbar(aes(ymin = p_win - error, ymax = p_win + error),color='red') +
scale_x_continuous(breaks = c(1:15)) +
scale_y_continuous(labels = scales::percent)
# The probability of completing the episode without losing a single time
p <- prod(difficulty$p_win)
# Printing it out
p
# Should our level designer worry about that a lot of
# players will complete the episode in one attempt?
should_the_designer_worry = FALSE # TRUE / FALSE
|
/candy_crush_analysis.r
|
no_license
|
IqraJunaid/ccsa
|
R
| false
| false
| 1,940
|
r
|
# This sets the size of plots to a good default.
options(repr.plot.width = 5, repr.plot.height = 4)
# Loading in packages
library(tidyverse)
# Reading in the data
data <- read_csv("candy_crush.csv")
# Printing out the first couple of rows
head(data)
print("Number of players:")
length(unique(data$player_id))
print("Period for which we have data:")
range(data$dt)
# Calculating level difficulty
difficulty <- data %>%
group_by(level) %>%
summarise(wins = sum(num_success), attempts = sum(num_attempts)) %>%
mutate(p_win = wins / attempts)
# Printing out the level difficulty
difficulty
# Plotting the level difficulty profile
ggplot(difficulty, aes(x = level, y = p_win)) +
geom_line(color='red') +
scale_x_continuous(breaks = c(1:15)) +
scale_y_continuous(labels = scales::percent)
# Adding points and a dashed line
ggplot(difficulty, aes(x = level, y = p_win)) +
geom_line(color='red') +
geom_point(color='darkblue') +
geom_hline(yintercept = 0.10, linetype = "dashed",color='darkgreen') +
scale_x_continuous(breaks = c(1:15)) +
scale_y_continuous(labels = scales::percent)
# Computing the standard error of p_win for each level
difficulty <- difficulty %>%
mutate(error = sqrt(p_win * (1 - p_win) / attempts))
# Adding standard error bars
ggplot(difficulty, aes(x = level, y = p_win)) +
geom_line(color='yellow') +
geom_point(color='blue') +
geom_hline(yintercept = 0.10, linetype = "dashed",color='darkgreen') +
geom_errorbar(aes(ymin = p_win - error, ymax = p_win + error),color='red') +
scale_x_continuous(breaks = c(1:15)) +
scale_y_continuous(labels = scales::percent)
# The probability of completing the episode without losing a single time
p <- prod(difficulty$p_win)
# Printing it out
p
# Should our level designer worry about that a lot of
# players will complete the episode in one attempt?
should_the_designer_worry = FALSE # TRUE / FALSE
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rapache.R
\name{remove.old.files}
\alias{remove.old.files}
\title{remove.old.files}
\usage{
remove.old.files(tmpdir, tmpdir.timeout.seconds)
}
\arguments{
\item{tmpdir}{Path to directory whose old files you want to delete}
\item{tmpdir.timeout.seconds}{Time in seconds. An attempt will be made to
delete files with ctimes older than this many seconds before the current time.}
}
\value{
see \code{\link{unlink}}
}
\description{
Remove old files from a directory
}
\details{
Remove old files from a directory
}
\author{
Brad Friedman
}
|
/man/remove.old.files.Rd
|
no_license
|
apomatix/AnalysisPageServer
|
R
| false
| true
| 614
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rapache.R
\name{remove.old.files}
\alias{remove.old.files}
\title{remove.old.files}
\usage{
remove.old.files(tmpdir, tmpdir.timeout.seconds)
}
\arguments{
\item{tmpdir}{Path to directory whose old files you want to delete}
\item{tmpdir.timeout.seconds}{Time in seconds. An attempt will be made to
delete files with ctimes older than this many seconds before the current time.}
}
\value{
see \code{\link{unlink}}
}
\description{
Remove old files from a directory
}
\details{
Remove old files from a directory
}
\author{
Brad Friedman
}
|
library(EvCombR)
### Name: extPoints
### Title: Extreme Points of a Credal Set
### Aliases: extPoints
### ** Examples
# state space
stateSpace <- c("a", "b", "c")
# construct credal set
c <- credal(c(0.1, 0.1, 0.1), c(0.8, 0.8, 0.8), stateSpace)
# obtain extrem points
eMat <- extPoints(c)
|
/data/genthat_extracted_code/EvCombR/examples/extPoints.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 300
|
r
|
library(EvCombR)
### Name: extPoints
### Title: Extreme Points of a Credal Set
### Aliases: extPoints
### ** Examples
# state space
stateSpace <- c("a", "b", "c")
# construct credal set
c <- credal(c(0.1, 0.1, 0.1), c(0.8, 0.8, 0.8), stateSpace)
# obtain extrem points
eMat <- extPoints(c)
|
testSet_load_values = c(
rbind(
testSet$Loads.0,
testSet$Loads.1,
testSet$Loads.2,
testSet$Loads.3,
testSet$Loads.4,
testSet$Loads.5,
testSet$Loads.6,
testSet$Loads.7,
testSet$Loads.8,
testSet$Loads.9,
testSet$Loads.10,
testSet$Loads.11,
testSet$Loads.12,
testSet$Loads.13,
testSet$Loads.14,
testSet$Loads.15,
testSet$Loads.16,
testSet$Loads.17,
testSet$Loads.18,
testSet$Loads.19,
testSet$Loads.20,
testSet$Loads.21,
testSet$Loads.22,
testSet$Loads.23
)
)
mape.ooem = 100 * mean(abs((testSet_load_values - ooem_predictions[["ooem_predictions"]])/testSet_load_values))
|
/scripts/9-ipto_vs_ooem.R
|
permissive
|
Time-Series-Analysis-learn/Greek-Electric-Load-Forecasting-IPTO
|
R
| false
| false
| 673
|
r
|
testSet_load_values = c(
rbind(
testSet$Loads.0,
testSet$Loads.1,
testSet$Loads.2,
testSet$Loads.3,
testSet$Loads.4,
testSet$Loads.5,
testSet$Loads.6,
testSet$Loads.7,
testSet$Loads.8,
testSet$Loads.9,
testSet$Loads.10,
testSet$Loads.11,
testSet$Loads.12,
testSet$Loads.13,
testSet$Loads.14,
testSet$Loads.15,
testSet$Loads.16,
testSet$Loads.17,
testSet$Loads.18,
testSet$Loads.19,
testSet$Loads.20,
testSet$Loads.21,
testSet$Loads.22,
testSet$Loads.23
)
)
mape.ooem = 100 * mean(abs((testSet_load_values - ooem_predictions[["ooem_predictions"]])/testSet_load_values))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lfocv.R
\name{loo.bmgarch}
\alias{loo.bmgarch}
\alias{loo}
\title{Leave-Future-Out Cross Validation (LFO-CV)}
\usage{
\method{loo}{bmgarch}(x, ..., type = "lfo", L = NULL, mode = "backward")
}
\arguments{
\item{x}{Fitted bmgarch model. \code{lfocv} inherits all attributes
from the bmgarch object}
\item{...}{Not used}
\item{type}{Takes \code{lfo} (default) or \code{loo}. LFO-CV is recommended
for time-series but LOO-CV may be obtained to assess the structural part of the model.}
\item{L}{Minimal length of times series before computing LFO}
\item{mode}{backward elpd_lfo approximation, or exact elpd-lfo;
Takes 'backward', and 'exact'. 'exact' fits N-L models and may
take a \emph{very} long time to complete. \code{forward} works too but is not
complete yet.}
}
\value{
Approximate LFO-CV value and log-likelihood values across (L+1):N
timepoints
}
\description{
\code{lfocv} returns the LFO-CV ELPD by either computing the exact ELDP or
by approximating it via
forward or backward approximation strategies based on Pareto smoothed
importance sampling
described in \insertCite{Buerkner2019}{bmgarch}.
}
\examples{
\dontrun{
data(stocks)
# Fit a DCC model
fit <- bmgarch(data = stocks[1:100, c("toyota", "nissan" )],
parameterization = "DCC", standardize_data = TRUE,
iterations = 500)
# Compute expected log-predictive density (elpd) using the backward mode
# L is the upper boundary of the time-series before we engage in LFO-CV
lfob <- loo(fit, mode = 'backward', L = 50 )
print(lfob)
}
}
\references{
\insertAllCited{}
}
|
/man/loo.bmgarch.Rd
|
no_license
|
shuning302/bmgarch
|
R
| false
| true
| 1,646
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lfocv.R
\name{loo.bmgarch}
\alias{loo.bmgarch}
\alias{loo}
\title{Leave-Future-Out Cross Validation (LFO-CV)}
\usage{
\method{loo}{bmgarch}(x, ..., type = "lfo", L = NULL, mode = "backward")
}
\arguments{
\item{x}{Fitted bmgarch model. \code{lfocv} inherits all attributes
from the bmgarch object}
\item{...}{Not used}
\item{type}{Takes \code{lfo} (default) or \code{loo}. LFO-CV is recommended
for time-series but LOO-CV may be obtained to assess the structural part of the model.}
\item{L}{Minimal length of times series before computing LFO}
\item{mode}{backward elpd_lfo approximation, or exact elpd-lfo;
Takes 'backward', and 'exact'. 'exact' fits N-L models and may
take a \emph{very} long time to complete. \code{forward} works too but is not
complete yet.}
}
\value{
Approximate LFO-CV value and log-likelihood values across (L+1):N
timepoints
}
\description{
\code{lfocv} returns the LFO-CV ELPD by either computing the exact ELDP or
by approximating it via
forward or backward approximation strategies based on Pareto smoothed
importance sampling
described in \insertCite{Buerkner2019}{bmgarch}.
}
\examples{
\dontrun{
data(stocks)
# Fit a DCC model
fit <- bmgarch(data = stocks[1:100, c("toyota", "nissan" )],
parameterization = "DCC", standardize_data = TRUE,
iterations = 500)
# Compute expected log-predictive density (elpd) using the backward mode
# L is the upper boundary of the time-series before we engage in LFO-CV
lfob <- loo(fit, mode = 'backward', L = 50 )
print(lfob)
}
}
\references{
\insertAllCited{}
}
|
## Assignment week 2 Part 2
##Goal : Write a function that reads a directory full of files and reports the number of completely observed cases in each data file.
## The function should return a data frame where the first column is the name of the file and the second column is the number of complete cases
## Author : Srivathsan Seshadri, '2015-05-17'
complete <- function(directory,id = 1:length(dir())){
##-------------------------------------------------------------------------
## where directory is a character vector of length 1 indicating
## the location of the csv files
## where id is an integer vector indicating monitor ID numbers
## to be provided by the use
##-------------------------------------------------------------------------
## setting working directory to the specified argument directory
setwd(directory)
## creating empty numeric vectors for id and number of complete obs (nobs)
ids <- numeric()
nobs <- numeric()
## Loop through files corresponding to input ids and calculate nobs for each
## file and append it nobs and also append id to id
for (k in seq_along(id)) {## converts id to string to be used in the path
if (id[k] < 10) {
fileid <- paste("00",toString(id[k]),".csv",sep="")
}
else if (id[k]>=10 && id[k] < 100 ){
fileid <- paste("0",toString(id[k]),".csv",sep="")
}
else {
fileid <- paste(toString(id[k]),".csv",sep="")
}
path2file <- file.path(getwd(),fileid) ## gets the path to the file thats required to get data from
## read csv file
x <- read.csv(path2file,header=T)
completeObs <- nrow(x[complete.cases(x),])
ids <- c(ids,id[k])
nobs <- c(nobs,completeObs)
}
data.frame(ids,nobs) ## Creating a data frame with ids and nobs
}
|
/complete.R
|
no_license
|
srivathsesh/R
|
R
| false
| false
| 2,184
|
r
|
## Assignment week 2 Part 2
##Goal : Write a function that reads a directory full of files and reports the number of completely observed cases in each data file.
## The function should return a data frame where the first column is the name of the file and the second column is the number of complete cases
## Author : Srivathsan Seshadri, '2015-05-17'
complete <- function(directory,id = 1:length(dir())){
##-------------------------------------------------------------------------
## where directory is a character vector of length 1 indicating
## the location of the csv files
## where id is an integer vector indicating monitor ID numbers
## to be provided by the use
##-------------------------------------------------------------------------
## setting working directory to the specified argument directory
setwd(directory)
## creating empty numeric vectors for id and number of complete obs (nobs)
ids <- numeric()
nobs <- numeric()
## Loop through files corresponding to input ids and calculate nobs for each
## file and append it nobs and also append id to id
for (k in seq_along(id)) {## converts id to string to be used in the path
if (id[k] < 10) {
fileid <- paste("00",toString(id[k]),".csv",sep="")
}
else if (id[k]>=10 && id[k] < 100 ){
fileid <- paste("0",toString(id[k]),".csv",sep="")
}
else {
fileid <- paste(toString(id[k]),".csv",sep="")
}
path2file <- file.path(getwd(),fileid) ## gets the path to the file thats required to get data from
## read csv file
x <- read.csv(path2file,header=T)
completeObs <- nrow(x[complete.cases(x),])
ids <- c(ids,id[k])
nobs <- c(nobs,completeObs)
}
data.frame(ids,nobs) ## Creating a data frame with ids and nobs
}
|
## ----message=FALSE-------------------------------------------------------
# SET UP --------------
# install.packages(c("ff", "ffbase"))
# load packages
library(ff)
library(ffbase)
library(pryr)
# create directory for ff chunks, and assign directory to ff
system("mkdir ffdf")
options(fftempdir = "ffdf")
## ------------------------------------------------------------------------
mem_change(
flights <-
read.table.ffdf(file="../data/flights.csv",
sep=",",
VERBOSE=TRUE,
header=TRUE,
next.rows=100000,
colClasses=NA)
)
## ------------------------------------------------------------------------
# show the files in the directory keeping the chunks
list.files("ffdf")
# investigate the structure of the object created in the R environment
str(flights)
## ------------------------------------------------------------------------
# SET UP ----------------
# load packages
library(bigmemory)
library(biganalytics)
# import the data
flights <- read.big.matrix("../data/flights.csv",
type="integer",
header=TRUE,
backingfile="flights.bin",
descriptorfile="flights.desc")
## ------------------------------------------------------------------------
summary(flights)
## ------------------------------------------------------------------------
## SET UP ------------------------
#Set working directory to the data and airline_id files.
# setwd("materials/code_book/B05396_Ch03_Code")
system("mkdir ffdf")
options(fftempdir = "ffdf")
# load packages
library(ff)
library(ffbase)
library(pryr)
# fix vars
FLIGHTS_DATA <- "../code_book/B05396_Ch03_Code/flights_sep_oct15.txt"
AIRLINES_DATA <- "../code_book/B05396_Ch03_Code/airline_id.csv"
## ------------------------------------------------------------------------
# DATA IMPORT ------------------
# 1. Upload flights_sep_oct15.txt and airline_id.csv files from flat files.
system.time(flights.ff <- read.table.ffdf(file=FLIGHTS_DATA,
sep=",",
VERBOSE=TRUE,
header=TRUE,
next.rows=100000,
colClasses=NA))
airlines.ff <- read.csv.ffdf(file= AIRLINES_DATA,
VERBOSE=TRUE,
header=TRUE,
next.rows=100000,
colClasses=NA)
# check memory used
mem_used()
## ------------------------------------------------------------------------
##Using read.table()
system.time(flights.table <- read.table(FLIGHTS_DATA,
sep=",",
header=TRUE))
gc()
system.time(airlines.table <- read.csv(AIRLINES_DATA,
header = TRUE))
# check memory used
mem_used()
## ------------------------------------------------------------------------
# 2. Inspect the ffdf objects.
## For flights.ff object:
class(flights.ff)
dim(flights.ff)
## For airlines.ff object:
class(airlines.ff)
dim(airlines.ff)
## ------------------------------------------------------------------------
# step 1:
## Rename "Code" variable from airlines.ff to "AIRLINE_ID" and "Description" into "AIRLINE_NM".
names(airlines.ff) <- c("AIRLINE_ID", "AIRLINE_NM")
names(airlines.ff)
str(airlines.ff[1:20,])
## ------------------------------------------------------------------------
# merge of ffdf objects
mem_change(flights.data.ff <- merge.ffdf(flights.ff, airlines.ff, by="AIRLINE_ID"))
#The new object is only 551.2 Kb in size
class(flights.data.ff)
dim(flights.data.ff)
dimnames.ffdf(flights.data.ff)
## ------------------------------------------------------------------------
##For flights.table:
names(airlines.table) <- c("AIRLINE_ID", "AIRLINE_NM")
names(airlines.table)
str(airlines.table[1:20,])
# check memory usage of merge in RAM
mem_change(flights.data.table <- merge(flights.table,
airlines.table,
by="AIRLINE_ID"))
#The new object is already 105.7 Mb in size
#A rapid spike in RAM use when processing
## ------------------------------------------------------------------------
# Inspect the current variable
table.ff(flights.data.ff$DAY_OF_WEEK)
head(flights.data.ff$DAY_OF_WEEK)
# Convert numeric ff DAY_OF_WEEK vector to a ff factor:
flights.data.ff$WEEKDAY <- cut.ff(flights.data.ff$DAY_OF_WEEK,
breaks = 7,
labels = c("Monday", "Tuesday",
"Wednesday", "Thursday",
"Friday", "Saturday",
"Sunday"))
# inspect the result
head(flights.data.ff$WEEKDAY)
table.ff(flights.data.ff$WEEKDAY)
## ------------------------------------------------------------------------
mem_used()
# Subset the ffdf object flights.data.ff:
subs1.ff <- subset.ffdf(flights.data.ff, CANCELLED == 1,
select = c(FL_DATE, AIRLINE_ID,
ORIGIN_CITY_NAME,
ORIGIN_STATE_NM,
DEST_CITY_NAME,
DEST_STATE_NM,
CANCELLATION_CODE))
dim(subs1.ff)
mem_used()
## ------------------------------------------------------------------------
# Save a newly created ffdf object to a data file:
save.ffdf(subs1.ff) #7 files (one for each column) created in the ffdb directory
## ------------------------------------------------------------------------
# Loading previously saved ffdf files:
rm(subs1.ff)
gc()
load.ffdf("ffdb")
str(subs1.ff)
dim(subs1.ff)
dimnames(subs1.ff)
## ----message=FALSE-------------------------------------------------------
# Export subs1.ff into CSV and TXT files:
write.csv.ffdf(subs1.ff, "subset1.csv")
|
/materials/sourcecode/04_cleaning_transformation.R
|
no_license
|
KKobrin95/BigData
|
R
| false
| false
| 6,151
|
r
|
## ----message=FALSE-------------------------------------------------------
# SET UP --------------
# install.packages(c("ff", "ffbase"))
# load packages
library(ff)
library(ffbase)
library(pryr)
# create directory for ff chunks, and assign directory to ff
system("mkdir ffdf")
options(fftempdir = "ffdf")
## ------------------------------------------------------------------------
mem_change(
flights <-
read.table.ffdf(file="../data/flights.csv",
sep=",",
VERBOSE=TRUE,
header=TRUE,
next.rows=100000,
colClasses=NA)
)
## ------------------------------------------------------------------------
# show the files in the directory keeping the chunks
list.files("ffdf")
# investigate the structure of the object created in the R environment
str(flights)
## ------------------------------------------------------------------------
# SET UP ----------------
# load packages
library(bigmemory)
library(biganalytics)
# import the data
flights <- read.big.matrix("../data/flights.csv",
type="integer",
header=TRUE,
backingfile="flights.bin",
descriptorfile="flights.desc")
## ------------------------------------------------------------------------
summary(flights)
## ------------------------------------------------------------------------
## SET UP ------------------------
#Set working directory to the data and airline_id files.
# setwd("materials/code_book/B05396_Ch03_Code")
system("mkdir ffdf")
options(fftempdir = "ffdf")
# load packages
library(ff)
library(ffbase)
library(pryr)
# fix vars
FLIGHTS_DATA <- "../code_book/B05396_Ch03_Code/flights_sep_oct15.txt"
AIRLINES_DATA <- "../code_book/B05396_Ch03_Code/airline_id.csv"
## ------------------------------------------------------------------------
# DATA IMPORT ------------------
# 1. Upload flights_sep_oct15.txt and airline_id.csv files from flat files.
system.time(flights.ff <- read.table.ffdf(file=FLIGHTS_DATA,
sep=",",
VERBOSE=TRUE,
header=TRUE,
next.rows=100000,
colClasses=NA))
airlines.ff <- read.csv.ffdf(file= AIRLINES_DATA,
VERBOSE=TRUE,
header=TRUE,
next.rows=100000,
colClasses=NA)
# check memory used
mem_used()
## ------------------------------------------------------------------------
##Using read.table()
system.time(flights.table <- read.table(FLIGHTS_DATA,
sep=",",
header=TRUE))
gc()
system.time(airlines.table <- read.csv(AIRLINES_DATA,
header = TRUE))
# check memory used
mem_used()
## ------------------------------------------------------------------------
# 2. Inspect the ffdf objects.
## For flights.ff object:
class(flights.ff)
dim(flights.ff)
## For airlines.ff object:
class(airlines.ff)
dim(airlines.ff)
## ------------------------------------------------------------------------
# step 1:
## Rename "Code" variable from airlines.ff to "AIRLINE_ID" and "Description" into "AIRLINE_NM".
names(airlines.ff) <- c("AIRLINE_ID", "AIRLINE_NM")
names(airlines.ff)
str(airlines.ff[1:20,])
## ------------------------------------------------------------------------
# merge of ffdf objects
mem_change(flights.data.ff <- merge.ffdf(flights.ff, airlines.ff, by="AIRLINE_ID"))
#The new object is only 551.2 Kb in size
class(flights.data.ff)
dim(flights.data.ff)
dimnames.ffdf(flights.data.ff)
## ------------------------------------------------------------------------
##For flights.table:
names(airlines.table) <- c("AIRLINE_ID", "AIRLINE_NM")
names(airlines.table)
str(airlines.table[1:20,])
# check memory usage of merge in RAM
mem_change(flights.data.table <- merge(flights.table,
airlines.table,
by="AIRLINE_ID"))
#The new object is already 105.7 Mb in size
#A rapid spike in RAM use when processing
## ------------------------------------------------------------------------
# Inspect the current variable
table.ff(flights.data.ff$DAY_OF_WEEK)
head(flights.data.ff$DAY_OF_WEEK)
# Convert numeric ff DAY_OF_WEEK vector to a ff factor:
flights.data.ff$WEEKDAY <- cut.ff(flights.data.ff$DAY_OF_WEEK,
breaks = 7,
labels = c("Monday", "Tuesday",
"Wednesday", "Thursday",
"Friday", "Saturday",
"Sunday"))
# inspect the result
head(flights.data.ff$WEEKDAY)
table.ff(flights.data.ff$WEEKDAY)
## ------------------------------------------------------------------------
mem_used()
# Subset the ffdf object flights.data.ff:
subs1.ff <- subset.ffdf(flights.data.ff, CANCELLED == 1,
select = c(FL_DATE, AIRLINE_ID,
ORIGIN_CITY_NAME,
ORIGIN_STATE_NM,
DEST_CITY_NAME,
DEST_STATE_NM,
CANCELLATION_CODE))
dim(subs1.ff)
mem_used()
## ------------------------------------------------------------------------
# Save a newly created ffdf object to a data file:
save.ffdf(subs1.ff) #7 files (one for each column) created in the ffdb directory
## ------------------------------------------------------------------------
# Loading previously saved ffdf files:
rm(subs1.ff)
gc()
load.ffdf("ffdb")
str(subs1.ff)
dim(subs1.ff)
dimnames(subs1.ff)
## ----message=FALSE-------------------------------------------------------
# Export subs1.ff into CSV and TXT files:
write.csv.ffdf(subs1.ff, "subset1.csv")
|
library(RPostgreSQL)
library(dplyr)
#Uvoz:
source("auth.R", encoding="UTF-8")
source("uvoz in urejanje podatkov/tabela.R", encoding="UTF-8")
# Povezemo se z gonilnikom za PostgreSQL
drv <- dbDriver("PostgreSQL")
# Funkcija za brisanje tabel
delete_table <- function(){
# Uporabimo funkcijo tryCatch,
# da prisilimo prekinitev povezave v primeru napake
tryCatch({
# Vzpostavimo povezavo z bazo
conn <- dbConnect(drv, dbname = db, host = host, user = user, password = password)
# Če tabela obstaja, jo zbrišemo, ter najprej zbrišemo tiste,
# ki se navezujejo na druge
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS driver CASCADE"))
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS team CASCADE"))
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS grand_prix"))
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS results"))
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS result"))
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS has"))
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS results_abudhabi "))
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS results_AbuDhabi "))
}, finally = {
dbDisconnect(conn)
})
}
pravice <- function(){
# Uporabimo tryCatch,(da se povežemo in bazo in odvežemo)
# da prisilimo prekinitev povezave v primeru napake
tryCatch({
# Vzpostavimo povezavo
conn <- dbConnect(drv, dbname = db, host = host,#drv=s čim se povezujemo
user = user, password = password)
dbSendQuery(conn, build_sql("GRANT CONNECT ON DATABASE sem2017_jurez TO urosk WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT CONNECT ON DATABASE sem2017_jurez TO domenh WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT ALL ON SCHEMA public TO urosk WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT ALL ON SCHEMA public TO domenh WITH GRANT OPTION"))
}, finally = {
# Na koncu nujno prekinemo povezavo z bazo,
# saj preveč odprtih povezav ne smemo imeti
dbDisconnect(conn) #PREKINEMO POVEZAVO
# Koda v finally bloku se izvede, preden program konča z napako
})
}
#Funkcija, ki ustvari tabele
create_table <- function(){
# Uporabimo tryCatch,(da se povežemo in bazo in odvežemo)
# da prisilimo prekinitev povezave v primeru napake
tryCatch({
# Vzpostavimo povezavo
conn <- dbConnect(drv, dbname = db, host = host,#drv=s čim se povezujemo
user = user, password = password)
#Glavne tabele
team <- dbSendQuery(conn,build_sql("CREATE TABLE team (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL UNIQUE,
country TEXT NOT NULL,
constructor TEXT NOT NULL,
chassis VARCHAR(13) NOT NULL UNIQUE,
power_unit VARCHAR(22) NOT NULL)"))
driver <- dbSendQuery(conn,build_sql("CREATE TABLE driver (
name TEXT NOT NULL,
surname TEXT NOT NULL,
car_number INTEGER PRIMARY KEY,
age INTEGER NOT NULL,
height INTEGER NOT NULL,
weight INTEGER NOT NULL,
country TEXT NOT NULL
)"))
grand_prix <- dbSendQuery(conn,build_sql("CREATE TABLE grand_prix (
round INTEGER PRIMARY KEY,
name TEXT NOT NULL UNIQUE,
circuit_name TEXT NOT NULL,
town TEXT NOT NULL,
date DATE NOT NULL,
circuit_length DECIMAL NOT NULL,
laps INTEGER NOT NULL)"))
has <- dbSendQuery(conn,build_sql("CREATE TABLE has (
team INTEGER NOT NULL REFERENCES team(id),
driver INTEGER NOT NULL REFERENCES driver(car_number),
PRIMARY KEY (team,driver),
CHECK (team <> driver))"))
results <- dbSendQuery(conn,build_sql("CREATE TABLE results (
position VARCHAR(2) NOT NULL,
car_number INTEGER REFERENCES driver(car_number),
name TEXT NOT NULL,
surname TEXT NOT NULL,
car TEXT NOT NULL,
laps INTEGER,
time VARCHAR(11) NOT NULL,
points INTEGER,
circuit TEXT NOT NULL,
start_position INTEGER NOT NULL)"))
dbSendQuery(conn, build_sql("GRANT ALL ON ALL TABLES IN SCHEMA public TO jurez WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT ALL ON ALL TABLES IN SCHEMA public TO urosk WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT ALL ON ALL TABLES IN SCHEMA public TO domenh WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO jurez WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO urosk WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO domenh WITH GRANT OPTION"))
}, finally = {
# Na koncu nujno prekinemo povezavo z bazo,
# saj preveč odprtih povezav ne smemo imeti
dbDisconnect(conn) #PREKINEMO POVEZAVO
# Koda v finally bloku se izvede, preden program konča z napako
})
}
#Funcija, ki vstavi podatke
insert_data <- function(){
tryCatch({
conn <- dbConnect(drv, dbname = db, host = host,
user = user, password = password)
dbWriteTable(conn, name="driver", tabeladirkacev, append=T, row.names=FALSE)
dbWriteTable(conn, name="team", tabelaekip, append=T, row.names=FALSE)
dbWriteTable(conn, name="grand_prix", tabelaGandPrix16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaAustria16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaBahrain16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaChina16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaRussia16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaSpain16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaMonaco16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaCanada16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaEurope16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaAustria16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaGreatBritain16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaHungary16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaGermany16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaBelgium16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaItaly16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaSingapore16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaMalaysia16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaJapan16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaUnitedStates16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaMexico16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaBrazil16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaAbuDhabi16, append=T, row.names=FALSE)
}, finally = {
dbDisconnect(conn)
})
}
delete_table()
pravice()
create_table()
insert_data()
con <- src_postgres(dbname = db, host = host, user = user, password = password)
#relacija has
tbl.driver <- tbl(con, "driver")
tbl.team <- tbl(con, "team")
tbl.results <- tbl(con,"results")
<<<<<<< HEAD
data.has <- tbl.driver %>% select(car_number, name, surname)%>%
inner_join(tbl.results %>% select(car_number, car),
by=c("car_number"="car_number"),copy = TRUE)%>%
inner_join(tbl.team%>% select(id,constructor),
by=c("car"="constructor"),copy = TRUE) %>%
select(team,driver)
=======
data.has <- inner_join(tbl.driver %>% select(car_number, name, surname), tbl.results %>% select(car_number, car),
tbl.team%>% select(id,constructor),
copy = TRUE) %>%
select(team,team=id,driver,driver=car_number)
>>>>>>> fd518ecf922b99ec3855a5fd69ab095f4fe0f17d
#Funkcija, ki vstavi relacije
insert_relation_data <- function(){
tryCatch({
conn <- dbConnect(drv, dbname = db, host = host,
user = user, password = password)
dbWriteTable(conn, name="has", data.has, append=T, row.names=FALSE)
}, finally = {
dbDisconnect(conn)
})
}
insert_relation_data()
|
/baza/baza.r
|
permissive
|
jaanos/Formula-1
|
R
| false
| false
| 9,676
|
r
|
library(RPostgreSQL)
library(dplyr)
#Uvoz:
source("auth.R", encoding="UTF-8")
source("uvoz in urejanje podatkov/tabela.R", encoding="UTF-8")
# Povezemo se z gonilnikom za PostgreSQL
drv <- dbDriver("PostgreSQL")
# Funkcija za brisanje tabel
delete_table <- function(){
# Uporabimo funkcijo tryCatch,
# da prisilimo prekinitev povezave v primeru napake
tryCatch({
# Vzpostavimo povezavo z bazo
conn <- dbConnect(drv, dbname = db, host = host, user = user, password = password)
# Če tabela obstaja, jo zbrišemo, ter najprej zbrišemo tiste,
# ki se navezujejo na druge
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS driver CASCADE"))
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS team CASCADE"))
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS grand_prix"))
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS results"))
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS result"))
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS has"))
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS results_abudhabi "))
dbSendQuery(conn,build_sql("DROP TABLE IF EXISTS results_AbuDhabi "))
}, finally = {
dbDisconnect(conn)
})
}
pravice <- function(){
# Uporabimo tryCatch,(da se povežemo in bazo in odvežemo)
# da prisilimo prekinitev povezave v primeru napake
tryCatch({
# Vzpostavimo povezavo
conn <- dbConnect(drv, dbname = db, host = host,#drv=s čim se povezujemo
user = user, password = password)
dbSendQuery(conn, build_sql("GRANT CONNECT ON DATABASE sem2017_jurez TO urosk WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT CONNECT ON DATABASE sem2017_jurez TO domenh WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT ALL ON SCHEMA public TO urosk WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT ALL ON SCHEMA public TO domenh WITH GRANT OPTION"))
}, finally = {
# Na koncu nujno prekinemo povezavo z bazo,
# saj preveč odprtih povezav ne smemo imeti
dbDisconnect(conn) #PREKINEMO POVEZAVO
# Koda v finally bloku se izvede, preden program konča z napako
})
}
#Funkcija, ki ustvari tabele
create_table <- function(){
# Uporabimo tryCatch,(da se povežemo in bazo in odvežemo)
# da prisilimo prekinitev povezave v primeru napake
tryCatch({
# Vzpostavimo povezavo
conn <- dbConnect(drv, dbname = db, host = host,#drv=s čim se povezujemo
user = user, password = password)
#Glavne tabele
team <- dbSendQuery(conn,build_sql("CREATE TABLE team (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL UNIQUE,
country TEXT NOT NULL,
constructor TEXT NOT NULL,
chassis VARCHAR(13) NOT NULL UNIQUE,
power_unit VARCHAR(22) NOT NULL)"))
driver <- dbSendQuery(conn,build_sql("CREATE TABLE driver (
name TEXT NOT NULL,
surname TEXT NOT NULL,
car_number INTEGER PRIMARY KEY,
age INTEGER NOT NULL,
height INTEGER NOT NULL,
weight INTEGER NOT NULL,
country TEXT NOT NULL
)"))
grand_prix <- dbSendQuery(conn,build_sql("CREATE TABLE grand_prix (
round INTEGER PRIMARY KEY,
name TEXT NOT NULL UNIQUE,
circuit_name TEXT NOT NULL,
town TEXT NOT NULL,
date DATE NOT NULL,
circuit_length DECIMAL NOT NULL,
laps INTEGER NOT NULL)"))
has <- dbSendQuery(conn,build_sql("CREATE TABLE has (
team INTEGER NOT NULL REFERENCES team(id),
driver INTEGER NOT NULL REFERENCES driver(car_number),
PRIMARY KEY (team,driver),
CHECK (team <> driver))"))
results <- dbSendQuery(conn,build_sql("CREATE TABLE results (
position VARCHAR(2) NOT NULL,
car_number INTEGER REFERENCES driver(car_number),
name TEXT NOT NULL,
surname TEXT NOT NULL,
car TEXT NOT NULL,
laps INTEGER,
time VARCHAR(11) NOT NULL,
points INTEGER,
circuit TEXT NOT NULL,
start_position INTEGER NOT NULL)"))
dbSendQuery(conn, build_sql("GRANT ALL ON ALL TABLES IN SCHEMA public TO jurez WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT ALL ON ALL TABLES IN SCHEMA public TO urosk WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT ALL ON ALL TABLES IN SCHEMA public TO domenh WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO jurez WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO urosk WITH GRANT OPTION"))
dbSendQuery(conn, build_sql("GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO domenh WITH GRANT OPTION"))
}, finally = {
# Na koncu nujno prekinemo povezavo z bazo,
# saj preveč odprtih povezav ne smemo imeti
dbDisconnect(conn) #PREKINEMO POVEZAVO
# Koda v finally bloku se izvede, preden program konča z napako
})
}
#Funcija, ki vstavi podatke
insert_data <- function(){
tryCatch({
conn <- dbConnect(drv, dbname = db, host = host,
user = user, password = password)
dbWriteTable(conn, name="driver", tabeladirkacev, append=T, row.names=FALSE)
dbWriteTable(conn, name="team", tabelaekip, append=T, row.names=FALSE)
dbWriteTable(conn, name="grand_prix", tabelaGandPrix16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaAustria16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaBahrain16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaChina16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaRussia16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaSpain16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaMonaco16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaCanada16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaEurope16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaAustria16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaGreatBritain16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaHungary16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaGermany16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaBelgium16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaItaly16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaSingapore16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaMalaysia16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaJapan16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaUnitedStates16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaMexico16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaBrazil16, append=T, row.names=FALSE)
dbWriteTable(conn, name="results", tabelaAbuDhabi16, append=T, row.names=FALSE)
}, finally = {
dbDisconnect(conn)
})
}
delete_table()
pravice()
create_table()
insert_data()
con <- src_postgres(dbname = db, host = host, user = user, password = password)
#relacija has
tbl.driver <- tbl(con, "driver")
tbl.team <- tbl(con, "team")
tbl.results <- tbl(con,"results")
<<<<<<< HEAD
data.has <- tbl.driver %>% select(car_number, name, surname)%>%
inner_join(tbl.results %>% select(car_number, car),
by=c("car_number"="car_number"),copy = TRUE)%>%
inner_join(tbl.team%>% select(id,constructor),
by=c("car"="constructor"),copy = TRUE) %>%
select(team,driver)
=======
data.has <- inner_join(tbl.driver %>% select(car_number, name, surname), tbl.results %>% select(car_number, car),
tbl.team%>% select(id,constructor),
copy = TRUE) %>%
select(team,team=id,driver,driver=car_number)
>>>>>>> fd518ecf922b99ec3855a5fd69ab095f4fe0f17d
#Funkcija, ki vstavi relacije
insert_relation_data <- function(){
tryCatch({
conn <- dbConnect(drv, dbname = db, host = host,
user = user, password = password)
dbWriteTable(conn, name="has", data.has, append=T, row.names=FALSE)
}, finally = {
dbDisconnect(conn)
})
}
insert_relation_data()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Toolbox_run.r
\name{supplyData}
\alias{supplyData}
\title{supplyData}
\usage{
supplyData(
dat,
variableType = "gaussian",
design.matrix,
levels.dat,
scenario.data,
effect.info = list(Multiplicative = 1, Fixed.change = 0, Effect.values = "0;-0.3"),
ncores = 1
)
}
\arguments{
\item{dat}{A data.frame containing the pilot data to use in the analysis}
\item{variableType}{A character string indicating the distribution to use
for the respon variable. One "gaussian", "gamma", "poisson", "nbinomial", "binomial" or "beta"}
\item{design.matrix}{A named list specifying the column headings to use from data that
correspond to different elements of the BACI design. Must contain:
Response (the column (heading) name in dat corresponding to the variable for which power is the be examined),
Trials (For a binomial response the column in dat indicating the number of trials),
Location, sublocation , Time, subtime,
BvA (the column (factor) in dat indicating data from "before" versus "after" the impact),
CvI (the column (factor) in dat indicating data from "control" (or reference) versus "impact" sites).
Elements not relevant to the design
must be included in the list, but may be specified as NA (e.g. subtime, sublocation).}
\item{levels.dat}{A named list containing the elements:
Before (the factor level in the BvA column of dat used to indicate the before data),
Control (the factor level in the CvI column of dat used to indicate data
for the control or reference location(s)),
After (the factor level in the BvA column of dat used to indicate the after data), and
Impact (the factor level in the CvI column of dat used to indicate data
for the impact location(s)).}
\item{scenario.data}{A named list containing the elements:
Number.of.iterations (the number of iterations to perform.
Note: It is recommended to run a small number (~5) initially
to confirm the simulation is working. Preliminary results
can be examined using ~100 iterations,
but a minimum of ~500 should be performed for final results),
filename="test" (a name for the simulation data to be saved as),
Number.of.Impact.Locations (the number of Impact Locations in the design),
Number.of.Control.Locations (the number of Control Locations in the design),
Number.of.sublocations.within.Location (the number of sublocations in the design.
Sublocation is nested within Location),
Number.of.sample.times.Before (the number of Before Times in the design),
Number.of.sample.times.After (the number of After Times in the design),
Number.of.subtimes.within.Time (the number of subtimes () in the design.
Subtime () is nested within Time, if there are no subtimes specify as 1),
Number.of.trials (the number of trials where data are a proportion or frequency,
and a binomial distribution is used),
Number.of.replicate.measurements (the number of replicates to use.
Note that a fixed replicate design is specified with replication = 1,
and the replicate ID assigned at "sublocation" above).
Separate multiple values to test using ";". E.g. 3;6;12.}
\item{effect.info}{A names list containing the elements:
Multiplicative (1, if a multiplicative effect is desired, otherwise 0),
Fixed.change (1, if a Fixed (additive) effect is desired, otherwise 0),
Effect.values=-0.3 (For a multiplicative effect: 0 = no effect; -0.1= a 10% decline;
+0.1 = a 10% increase etc.
For an absolute change in the fixed effect: 0 = no effect; +10 = an increase of 10 units;
and -10 = a decrease of 10 units).
Separate multiple effect size values to test using ";".
Note: "Multiplicative"
will multiply the response variable by the supplied value and then add this as the
"effect"; "Fixed" will add the supplied value to the response variable directly.
For either case the "impact" is applied only to "after" "impact" samples.}
\item{ncores}{the number of cores required for the analysis
Unlike fitData(), the function supplyData() only generates the dataComponents
object and does not automatically call powerScenario, this step must be done
manually by the user.
You will need to check the current approximate CPU load
on the available cores before implementing ncores > 1}
}
\value{
A dataComponents list containing all the information required to run powerScenario.
}
\description{
An alternative to fitData() that allows the power analysis to be set up and run
entirely through R, without using the excel interface.
It generates the necessary dataComponents object required to run powerScenario,
which generates the necessary object scenarioParams to run function assessPower.
}
\references{
Fisher R, Shiell GR, Sadler RJ, Inostroza K, Shedrawi G, Holmes TH, McGree JM (2019) epower: an R package for power analysis of Before-After-Control-Impact (BACI) designs. Methods in Ecology and Evolution.
}
\author{
Rebecca Fisher \email{r.fisher@aims.gov.au}
}
|
/man/supplyData.Rd
|
no_license
|
bmtglobal/epower
|
R
| false
| true
| 4,894
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Toolbox_run.r
\name{supplyData}
\alias{supplyData}
\title{supplyData}
\usage{
supplyData(
dat,
variableType = "gaussian",
design.matrix,
levels.dat,
scenario.data,
effect.info = list(Multiplicative = 1, Fixed.change = 0, Effect.values = "0;-0.3"),
ncores = 1
)
}
\arguments{
\item{dat}{A data.frame containing the pilot data to use in the analysis}
\item{variableType}{A character string indicating the distribution to use
for the respon variable. One "gaussian", "gamma", "poisson", "nbinomial", "binomial" or "beta"}
\item{design.matrix}{A named list specifying the column headings to use from data that
correspond to different elements of the BACI design. Must contain:
Response (the column (heading) name in dat corresponding to the variable for which power is the be examined),
Trials (For a binomial response the column in dat indicating the number of trials),
Location, sublocation , Time, subtime,
BvA (the column (factor) in dat indicating data from "before" versus "after" the impact),
CvI (the column (factor) in dat indicating data from "control" (or reference) versus "impact" sites).
Elements not relevant to the design
must be included in the list, but may be specified as NA (e.g. subtime, sublocation).}
\item{levels.dat}{A named list containing the elements:
Before (the factor level in the BvA column of dat used to indicate the before data),
Control (the factor level in the CvI column of dat used to indicate data
for the control or reference location(s)),
After (the factor level in the BvA column of dat used to indicate the after data), and
Impact (the factor level in the CvI column of dat used to indicate data
for the impact location(s)).}
\item{scenario.data}{A named list containing the elements:
Number.of.iterations (the number of iterations to perform.
Note: It is recommended to run a small number (~5) initially
to confirm the simulation is working. Preliminary results
can be examined using ~100 iterations,
but a minimum of ~500 should be performed for final results),
filename="test" (a name for the simulation data to be saved as),
Number.of.Impact.Locations (the number of Impact Locations in the design),
Number.of.Control.Locations (the number of Control Locations in the design),
Number.of.sublocations.within.Location (the number of sublocations in the design.
Sublocation is nested within Location),
Number.of.sample.times.Before (the number of Before Times in the design),
Number.of.sample.times.After (the number of After Times in the design),
Number.of.subtimes.within.Time (the number of subtimes () in the design.
Subtime () is nested within Time, if there are no subtimes specify as 1),
Number.of.trials (the number of trials where data are a proportion or frequency,
and a binomial distribution is used),
Number.of.replicate.measurements (the number of replicates to use.
Note that a fixed replicate design is specified with replication = 1,
and the replicate ID assigned at "sublocation" above).
Separate multiple values to test using ";". E.g. 3;6;12.}
\item{effect.info}{A names list containing the elements:
Multiplicative (1, if a multiplicative effect is desired, otherwise 0),
Fixed.change (1, if a Fixed (additive) effect is desired, otherwise 0),
Effect.values=-0.3 (For a multiplicative effect: 0 = no effect; -0.1= a 10% decline;
+0.1 = a 10% increase etc.
For an absolute change in the fixed effect: 0 = no effect; +10 = an increase of 10 units;
and -10 = a decrease of 10 units).
Separate multiple effect size values to test using ";".
Note: "Multiplicative"
will multiply the response variable by the supplied value and then add this as the
"effect"; "Fixed" will add the supplied value to the response variable directly.
For either case the "impact" is applied only to "after" "impact" samples.}
\item{ncores}{the number of cores required for the analysis
Unlike fitData(), the function supplyData() only generates the dataComponents
object and does not automatically call powerScenario, this step must be done
manually by the user.
You will need to check the current approximate CPU load
on the available cores before implementing ncores > 1}
}
\value{
A dataComponents list containing all the information required to run powerScenario.
}
\description{
An alternative to fitData() that allows the power analysis to be set up and run
entirely through R, without using the excel interface.
It generates the necessary dataComponents object required to run powerScenario,
which generates the necessary object scenarioParams to run function assessPower.
}
\references{
Fisher R, Shiell GR, Sadler RJ, Inostroza K, Shedrawi G, Holmes TH, McGree JM (2019) epower: an R package for power analysis of Before-After-Control-Impact (BACI) designs. Methods in Ecology and Evolution.
}
\author{
Rebecca Fisher \email{r.fisher@aims.gov.au}
}
|
context("CLI")
#this sets up the CLI interface and the shortcuts ${indir} and ${outdir}
source("utils.R")
configureSys()
#this is to fix some obscure behaviour of R CMD check
Sys.setenv("R_TESTS" = "")
#run a command without displaying absolutely anything
runQuiet <- function(cmd){
code <- paste('-c', shQuote(cmd))
#errors can happen at the R level, or the command can cause the error
#the first case is handled by this try catch,
#the second case is handled by checking if out has attribute 'status'
#in both cases out should contain the error message and a status flag
out <- suppressWarnings(
tryCatch(
system2("bash", code, stdout=FALSE, stderr=TRUE),
error=function(e){
msg <- e$msg
attr(msg, "status") <- 1
msg
}))
if (!is.null(attr(out, "status"))) stop(paste(out, collapse = '\n'))
}
#test that a command runs
truns <- function(cmd) expect_runs(runQuiet(cmd), label=cmd)
#test that a command fails
tfails <- function(cmd) expect_error(runQuiet(cmd), label=cmd)
test_that("Command line interface",{
#GETCOUNTS
#this creates $outdir/countmat.txt,
#which we will base other tests on
truns(
"epicseg.R getcounts --target ${outdir}/countmat.txt \\
--regions ${indir}/contigs.bed \\
--mark H3K27ac:${indir}/H3K4me3.bam \\
--mark H3K27me3:${indir}/H3K36me3.bam \\
--mark H3K36me3:${indir}/H3K9me3.bam \\
--mark H3K4me1:${indir}/H3K4me3.bam \\
--mark H3K4me3:${indir}/H3K36me3.bam \\
--mark H3K9me3:${indir}/H3K9me3.bam \\
--mark Input:${indir}/H3K4me3.bam")
#check if the automatic region trimming works
truns(
"epicseg.R getcounts -r ${indir}/contigs.bed \\
--mark H3K4me3:${indir}/H3K4me3.bam --mark H3K36me3:${indir}/H3K36me3.bam \\
-b 157 -t ${outdir}/counts_prova.rda")
#let's try to give three bam files and two pairedends options (it should throw an error)
tfails(
"epicseg.R getcounts -r ${indir}/contigs.bed \\
-m H3K4me3:${indir}/H3K4me3.bam \\
-m H3K36me3:${indir}/H3K36me3.bam \\
-m H3K27me3:${indir}/H3K9me3.bam \\
-b 157 -t ${outdir}/counts_prova.txt -p T -p F")
#let's try to give the -p option without a value (it should throw an error)
tfails(
"epicseg.R getcounts -r ${indir}/contigs.bed \\
-m H3K4me3:${indir}/H3K4me3.bam \\
-b 157 -t ${outdir}/counts_prova.txt -p")
#check if saving to txt works
truns(
"epicseg.R getcounts -r ${indir}/contigs.bed \\
--mark H3K4me3:${indir}/H3K4me3.bam \\
--mark H3K36me3:${indir}/H3K36me3.bam \\
-b 157 -t ${outdir}/counts_prova.txt")
#check if it runs with replicate experiments
truns(
"epicseg.R getcounts -r ${indir}/contigs.bed \\
--mark H3K4me3:${indir}/H3K4me3.bam \\
--mark H3K36me3:${indir}/H3K36me3.bam \\
--mark H3K36me3:${indir}/H3K9me3.bam \\
-b 157 -t ${outdir}/counts_prova.txt")
#SEGMENT
#test segment with a single histone mark
truns(
"epicseg.R getcounts --target ${outdir}/countmat_onemark.txt \\
--regions ${indir}/contigs.bed \\
--mark H3K4me3:${indir}/H3K4me3.bam")
truns(
"epicseg.R segment -c ${outdir}/countmat_onemark.txt -r \\
${indir}/contigs.bed -n 2 --nthreads 4 \\
-a genes:${indir}/genes.bed --maxiter 20 --outdir ${outdir}")
#one histone mark and one state
truns(
"epicseg.R segment -c ${outdir}/countmat_onemark.txt -r \\
${indir}/contigs.bed -n 1 --nthreads 4 \\
-a genes:${indir}/genes.bed --maxiter 20 --outdir ${outdir}")
#a more normal usage
truns(
"epicseg.R segment -c ${outdir}/countmat.txt -r \\
${indir}/contigs.bed -n 10 --nthreads 4 \\
-a genes:${indir}/genes.bed --maxiter 20 --outdir ${outdir}")
#segment with --model option
truns(
"epicseg.R segment -n 10 -m ${outdir}/model.txt \\
-c ${outdir}/countmat.txt -r ${indir}/contigs.bed --outdir ${outdir}/")
#let's try with an incomplete model (we delete initP and transP from model.txt)
system("head ${outdir}/model.txt -n 15 > ${outdir}/incomplete_model.txt")
truns(
"epicseg.R segment -n 10 -m ${outdir}/incomplete_model.txt \\
-c ${outdir}/countmat.txt -r ${indir}/contigs.bed --outdir ${outdir}")
#let's try out the predict, collapseInitP and save_rdata flags
truns(
"epicseg.R segment --collapseInitP T --save_rdata \\
--notrain -m ${outdir}/model.txt -c ${outdir}/countmat.txt \\
-r ${indir}/contigs.bed -n 10 --outdir ${outdir}")
#check whether the rdata was created
truns("ls ${outdir}/rdata.Rdata")
#REPORT
truns(
"epicseg.R report -m ${outdir}/model.txt -s ${outdir}/segmentation.bed \\
--outdir ${outdir}")
#let's try the colors option
truns(
"epicseg.R report --colors ${outdir}/colors.txt \\
-m ${outdir}/model.txt -s ${outdir}/segmentation.bed --outdir ${outdir}")
#let's try the labels option
writeLines("ale\nmarco\nandrea\ngiacomo\nanna\njuliane\nbarbara\nisabella\nfrancesco\nchiara",
file.path(Sys.getenv("outdir"), "labels.txt"))
truns(
"epicseg.R report --labels ${outdir}/labels.txt \\
-m ${outdir}/model.txt -s ${outdir}/segmentation.bed --outdir ${outdir}")
truns("ls ${outdir}/segmentation_labelled.bed")
#let's try multiple annotations
truns(
"epicseg.R report \\
-a genes:${indir}/genes.bed --annot genes2:${indir}/genes.bed \\
-m ${outdir}/model.txt -s ${outdir}/segmentation.bed --outdir ${outdir}")
truns("ls ${outdir}/annot_genes.txt")
truns("ls ${outdir}/annot_genes2.txt")
#now try multiple segmentations
truns(
"epicseg.R report --labels ${outdir}/labels.txt -o ${outdir} -m ${outdir}/model.txt \\
-s seg1:${outdir}/segmentation.bed -s seg2:${outdir}/segmentation.bed \\
-a genes:${indir}/genes.bed")
})
test_that("multiple datasets",{
#make 3 count matrices
os <- list(c(1,2,3), c(2,1,3), c(3,2,1))
os <- lapply(os, function(o) c("H3K4me3.bam", "H3K36me3.bam", "H3K9me3.bam")[o])
dsets <- paste0("cmat", 1:3)
targets <- file.path("${outdir}", paste0(dsets, ".txt"))
for (i in seq_along(os)){
o <- os[[i]]
mline <- paste0(collapse=" ", "-m ",
c("H3K4me3", "H3K36me", "H3K9me3"), ":${indir}/", o)
truns(paste("epicseg.R getcounts -r ${indir}/contigs.bed -t ", targets[i], mline))
}
#run normalize counts
cline <- paste0(collapse=" ", "-c ", targets)
truns(paste0("epicseg.R normalizecounts ", cline))
#check that the new matrices have been created according to the suffix
newtargets <- gsub(".txt$", paste0(defSuffix, ".txt"), targets)
for (t in newtargets) truns(paste0("ls ", t))
#check that using the 'triggerOverwrite' suffix no new files are created
#(in the temporary directory)
outdir <- Sys.getenv("outdir")
lfOld <- list.files(outdir)
cline <- paste0(collapse=" ", "-c ", newtargets)
truns(paste0("epicseg.R normalizecounts ", cline, " -s ", triggerOverwrite))
expect_true(setequal(list.files(outdir), lfOld))
#check that the segmentation runs with multiple datasets
goodcline <- paste0(collapse=" ", "-c ", dsets, ":", newtargets)
truns(paste0("epicseg.R segment ", goodcline, " -n 5 -r ${indir}/contigs.bed -o ${outdir}"))
#check that without labels it fails!
badcline <- paste0(collapse=" ", "-c ", newtargets)
tfails(paste0("epicseg.R segment ", badcline, " -n 5 -r ${indir}/contigs.bed -o ${outdir}"))
})
test_that("usage examples", {
#get location of the Rmarkdown file
rmd <- system.file("extdata", "cliExamples.Rmd", package="epicseg")
library(knitr)
#write markdown output in a temporary file
tmp <- tempfile("cliexamples", tmpdir=Sys.getenv("outdir"), fileext=".md")
expect_runs(knit(rmd, tmp))
})
|
/tests/testthat/test-_CLI.R
|
no_license
|
SamBuckberry/epicseg
|
R
| false
| false
| 7,982
|
r
|
context("CLI")
#this sets up the CLI interface and the shortcuts ${indir} and ${outdir}
source("utils.R")
configureSys()
#this is to fix some obscure behaviour of R CMD check
Sys.setenv("R_TESTS" = "")
#run a command without displaying absolutely anything
runQuiet <- function(cmd){
code <- paste('-c', shQuote(cmd))
#errors can happen at the R level, or the command can cause the error
#the first case is handled by this try catch,
#the second case is handled by checking if out has attribute 'status'
#in both cases out should contain the error message and a status flag
out <- suppressWarnings(
tryCatch(
system2("bash", code, stdout=FALSE, stderr=TRUE),
error=function(e){
msg <- e$msg
attr(msg, "status") <- 1
msg
}))
if (!is.null(attr(out, "status"))) stop(paste(out, collapse = '\n'))
}
#test that a command runs
truns <- function(cmd) expect_runs(runQuiet(cmd), label=cmd)
#test that a command fails
tfails <- function(cmd) expect_error(runQuiet(cmd), label=cmd)
test_that("Command line interface",{
#GETCOUNTS
#this creates $outdir/countmat.txt,
#which we will base other tests on
truns(
"epicseg.R getcounts --target ${outdir}/countmat.txt \\
--regions ${indir}/contigs.bed \\
--mark H3K27ac:${indir}/H3K4me3.bam \\
--mark H3K27me3:${indir}/H3K36me3.bam \\
--mark H3K36me3:${indir}/H3K9me3.bam \\
--mark H3K4me1:${indir}/H3K4me3.bam \\
--mark H3K4me3:${indir}/H3K36me3.bam \\
--mark H3K9me3:${indir}/H3K9me3.bam \\
--mark Input:${indir}/H3K4me3.bam")
#check if the automatic region trimming works
truns(
"epicseg.R getcounts -r ${indir}/contigs.bed \\
--mark H3K4me3:${indir}/H3K4me3.bam --mark H3K36me3:${indir}/H3K36me3.bam \\
-b 157 -t ${outdir}/counts_prova.rda")
#let's try to give three bam files and two pairedends options (it should throw an error)
tfails(
"epicseg.R getcounts -r ${indir}/contigs.bed \\
-m H3K4me3:${indir}/H3K4me3.bam \\
-m H3K36me3:${indir}/H3K36me3.bam \\
-m H3K27me3:${indir}/H3K9me3.bam \\
-b 157 -t ${outdir}/counts_prova.txt -p T -p F")
#let's try to give the -p option without a value (it should throw an error)
tfails(
"epicseg.R getcounts -r ${indir}/contigs.bed \\
-m H3K4me3:${indir}/H3K4me3.bam \\
-b 157 -t ${outdir}/counts_prova.txt -p")
#check if saving to txt works
truns(
"epicseg.R getcounts -r ${indir}/contigs.bed \\
--mark H3K4me3:${indir}/H3K4me3.bam \\
--mark H3K36me3:${indir}/H3K36me3.bam \\
-b 157 -t ${outdir}/counts_prova.txt")
#check if it runs with replicate experiments
truns(
"epicseg.R getcounts -r ${indir}/contigs.bed \\
--mark H3K4me3:${indir}/H3K4me3.bam \\
--mark H3K36me3:${indir}/H3K36me3.bam \\
--mark H3K36me3:${indir}/H3K9me3.bam \\
-b 157 -t ${outdir}/counts_prova.txt")
#SEGMENT
#test segment with a single histone mark
truns(
"epicseg.R getcounts --target ${outdir}/countmat_onemark.txt \\
--regions ${indir}/contigs.bed \\
--mark H3K4me3:${indir}/H3K4me3.bam")
truns(
"epicseg.R segment -c ${outdir}/countmat_onemark.txt -r \\
${indir}/contigs.bed -n 2 --nthreads 4 \\
-a genes:${indir}/genes.bed --maxiter 20 --outdir ${outdir}")
#one histone mark and one state
truns(
"epicseg.R segment -c ${outdir}/countmat_onemark.txt -r \\
${indir}/contigs.bed -n 1 --nthreads 4 \\
-a genes:${indir}/genes.bed --maxiter 20 --outdir ${outdir}")
#a more normal usage
truns(
"epicseg.R segment -c ${outdir}/countmat.txt -r \\
${indir}/contigs.bed -n 10 --nthreads 4 \\
-a genes:${indir}/genes.bed --maxiter 20 --outdir ${outdir}")
#segment with --model option
truns(
"epicseg.R segment -n 10 -m ${outdir}/model.txt \\
-c ${outdir}/countmat.txt -r ${indir}/contigs.bed --outdir ${outdir}/")
#let's try with an incomplete model (we delete initP and transP from model.txt)
system("head ${outdir}/model.txt -n 15 > ${outdir}/incomplete_model.txt")
truns(
"epicseg.R segment -n 10 -m ${outdir}/incomplete_model.txt \\
-c ${outdir}/countmat.txt -r ${indir}/contigs.bed --outdir ${outdir}")
#let's try out the predict, collapseInitP and save_rdata flags
truns(
"epicseg.R segment --collapseInitP T --save_rdata \\
--notrain -m ${outdir}/model.txt -c ${outdir}/countmat.txt \\
-r ${indir}/contigs.bed -n 10 --outdir ${outdir}")
#check whether the rdata was created
truns("ls ${outdir}/rdata.Rdata")
#REPORT
truns(
"epicseg.R report -m ${outdir}/model.txt -s ${outdir}/segmentation.bed \\
--outdir ${outdir}")
#let's try the colors option
truns(
"epicseg.R report --colors ${outdir}/colors.txt \\
-m ${outdir}/model.txt -s ${outdir}/segmentation.bed --outdir ${outdir}")
#let's try the labels option
writeLines("ale\nmarco\nandrea\ngiacomo\nanna\njuliane\nbarbara\nisabella\nfrancesco\nchiara",
file.path(Sys.getenv("outdir"), "labels.txt"))
truns(
"epicseg.R report --labels ${outdir}/labels.txt \\
-m ${outdir}/model.txt -s ${outdir}/segmentation.bed --outdir ${outdir}")
truns("ls ${outdir}/segmentation_labelled.bed")
#let's try multiple annotations
truns(
"epicseg.R report \\
-a genes:${indir}/genes.bed --annot genes2:${indir}/genes.bed \\
-m ${outdir}/model.txt -s ${outdir}/segmentation.bed --outdir ${outdir}")
truns("ls ${outdir}/annot_genes.txt")
truns("ls ${outdir}/annot_genes2.txt")
#now try multiple segmentations
truns(
"epicseg.R report --labels ${outdir}/labels.txt -o ${outdir} -m ${outdir}/model.txt \\
-s seg1:${outdir}/segmentation.bed -s seg2:${outdir}/segmentation.bed \\
-a genes:${indir}/genes.bed")
})
test_that("multiple datasets",{
#make 3 count matrices
os <- list(c(1,2,3), c(2,1,3), c(3,2,1))
os <- lapply(os, function(o) c("H3K4me3.bam", "H3K36me3.bam", "H3K9me3.bam")[o])
dsets <- paste0("cmat", 1:3)
targets <- file.path("${outdir}", paste0(dsets, ".txt"))
for (i in seq_along(os)){
o <- os[[i]]
mline <- paste0(collapse=" ", "-m ",
c("H3K4me3", "H3K36me", "H3K9me3"), ":${indir}/", o)
truns(paste("epicseg.R getcounts -r ${indir}/contigs.bed -t ", targets[i], mline))
}
#run normalize counts
cline <- paste0(collapse=" ", "-c ", targets)
truns(paste0("epicseg.R normalizecounts ", cline))
#check that the new matrices have been created according to the suffix
newtargets <- gsub(".txt$", paste0(defSuffix, ".txt"), targets)
for (t in newtargets) truns(paste0("ls ", t))
#check that using the 'triggerOverwrite' suffix no new files are created
#(in the temporary directory)
outdir <- Sys.getenv("outdir")
lfOld <- list.files(outdir)
cline <- paste0(collapse=" ", "-c ", newtargets)
truns(paste0("epicseg.R normalizecounts ", cline, " -s ", triggerOverwrite))
expect_true(setequal(list.files(outdir), lfOld))
#check that the segmentation runs with multiple datasets
goodcline <- paste0(collapse=" ", "-c ", dsets, ":", newtargets)
truns(paste0("epicseg.R segment ", goodcline, " -n 5 -r ${indir}/contigs.bed -o ${outdir}"))
#check that without labels it fails!
badcline <- paste0(collapse=" ", "-c ", newtargets)
tfails(paste0("epicseg.R segment ", badcline, " -n 5 -r ${indir}/contigs.bed -o ${outdir}"))
})
test_that("usage examples", {
#get location of the Rmarkdown file
rmd <- system.file("extdata", "cliExamples.Rmd", package="epicseg")
library(knitr)
#write markdown output in a temporary file
tmp <- tempfile("cliexamples", tmpdir=Sys.getenv("outdir"), fileext=".md")
expect_runs(knit(rmd, tmp))
})
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/functions-protein.R
\name{count.ProteinDetail}
\alias{count.ProteinDetail}
\title{Returns the number of proteins for a particual public project}
\usage{
count.ProteinDetail(project.accession)
}
\arguments{
\item{project.accession}{The project accession to count proteins from}
}
\value{
The count of proteins
}
\description{
Returns the number of proteins for a particual public project
}
\details{
TODO
}
\author{
Jose A. Dianes
}
|
/vignettes/man/count.ProteinDetail.Rd
|
no_license
|
gccong/ddiR-sirius
|
R
| false
| false
| 519
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/functions-protein.R
\name{count.ProteinDetail}
\alias{count.ProteinDetail}
\title{Returns the number of proteins for a particual public project}
\usage{
count.ProteinDetail(project.accession)
}
\arguments{
\item{project.accession}{The project accession to count proteins from}
}
\value{
The count of proteins
}
\description{
Returns the number of proteins for a particual public project
}
\details{
TODO
}
\author{
Jose A. Dianes
}
|
#' Gets details for bills.
#'
#' Data on bills in Congress goes back to 2009, and comes from a mix of sources:
#' \itemize{
#' \item Scrapers at github.com/unitedstates for most data, including core status and history
#' information.
#' \item Bulk data at GPO's FDSys for version information, and full text.
#' \item The House' MajorityLeader.gov and Senate Democrats' official site for notices of upcoming
#' debate.
#' }
#'
#' @export
#' @template bills
#' @template cg
#'
#' @details
#' History: The history field includes useful flags and dates/times in a bill's life. The above is
#' a real-life example of H.R. 3590 - not all fields will be present for every bill. Time fields
#' can hold either dates or times - Congress is inconsistent about providing specific timestamps.
#'
#' @return Committee details including subcommittees and all members.
#'
#' @examples \dontrun{
#' # Bill lookup (i.e., filter)
#' cg_bills(congress=113, history.enacted=TRUE)
#' cg_bills(history.active=TRUE, order='last_action_at')
#' cg_bills(sponsor.party='R', history.vetoed=TRUE)
#' cg_bills(enacted_as.law_type='private', order='history.enacted_at')
#' cg_bills(bill_type__in='hjres|sjres', history.house_passage_result__exists=TRUE,
#' history.senate_passage_result__exists=TRUE)
#'
#' # Bill search
#' cg_bills(query='health care')
#' cg_bills(query='health care', history.enacted=TRUE)
#' cg_bills(query='freedom of information')
#' cg_bills(query='"freedom of information" accountab*')
#' cg_bills(query='transparency accountability'~5, highlight=TRUE)
#'
#' # Disable pagination
#' cg_bills(per_page='all')
#' }
cg_bills <- function(query = NULL, bill_id = NULL, bill_type = NULL, number = NULL,
congress = NULL, chamber = NULL, introduced_on = NULL, last_action_at = NULL,
last_vote_at = NULL, last_version_on = NULL, highlight = NULL, history.active = NULL,
history.active_at = NULL, history.house_passage_result = NULL,
history.house_passage_result_at = NULL, history.senate_cloture_result = NULL,
history.senate_cloture_result_at = NULL, history.senate_passage_result = NULL,
history.senate_passage_result_at = NULL, history.vetoed = NULL, history.vetoed_at = NULL,
history.house_override_result = NULL, history.house_override_result_at = NULL,
history.senate_override_result = NULL, history.senate_override_result_at = NULL,
history.awaiting_signature = NULL, history.awaiting_signature_since = NULL,
history.enacted = NULL, history.enacted_at = NULL,
sponsor.party = NULL, enacted_as.law_type = NULL, bill_type__in = NULL,
history.house_passage_result__exists = NULL, history.senate_passage_result__exists = NULL,
nicknames=NULL, keywords=NULL, sponsor_id=NULL, cosponsor_ids=NULL, cosponsors_count=NULL,
withdrawn_cosponsors_count=NULL, withdrawn_cosponsor_ids=NULL, committee_ids=NULL,
related_bill_ids=NULL, enacted_as.congress=NULL,
enacted_as.number=NULL, fields=NULL, page = 1, per_page = 20, order = NULL,
key=getOption("SunlightLabsKey", stop("need an API key for Sunlight Labs")),
return='table', ...)
{
if(is.null(query)){
url <- paste0(cgurl(), '/bills')
} else {
url <- paste0(cgurl(), '/bills/search')
}
args <- suncompact(list(apikey=key,query=query,bill_id=bill_id,bill_type=bill_type,
number=number,congress=congress,chamber=chamber,introduced_on=introduced_on,
last_action_at=last_action_at,last_vote_at=last_vote_at,last_version_on=last_version_on,
highlight=highlight,history.active=ll(history.active), history.active_at=history.active_at,
history.house_passage_result=history.house_passage_result,
history.house_passage_result_at=history.house_passage_result_at,
history.senate_cloture_result=history.senate_cloture_result,
history.senate_cloture_result_at=history.senate_cloture_result_at,
history.senate_passage_result=history.senate_passage_result,
history.senate_passage_result_at=history.senate_passage_result_at,
history.vetoed=ll(history.vetoed), history.vetoed_at=history.vetoed_at,
history.house_override_result=history.house_override_result,
history.house_override_result_at=history.house_override_result_at,
history.senate_override_result=history.senate_override_result,
history.senate_override_result_at=history.senate_override_result_at,
history.awaiting_signature=ll(history.awaiting_signature),
history.awaiting_signature_since=history.awaiting_signature_since,
history.enacted=ll(history.enacted), history.enacted_at=history.enacted_at,
sponsor.party=sponsor.party,order=order,enacted_as.law_type=enacted_as.law_type,
bill_type__in=bill_type__in,
history.house_passage_result__exists=history.house_passage_result__exists,
history.senate_passage_result__exists=history.senate_passage_result__exists,
page=page,per_page=per_page,fields=fields,
nicknames=nicknames, keywords=keywords, sponsor_id=sponsor_id, cosponsor_ids=cosponsor_ids,
cosponsors_count=cosponsors_count, withdrawn_cosponsors_count=withdrawn_cosponsors_count,
committee_ids=committee_ids, related_bill_ids=related_bill_ids, enacted_as.congress=enacted_as.congress,
enacted_as.number=enacted_as.number))
return_obj(return, query(url, args, ...))
}
ll <- function(x) if(!is.null(x)){ if(x) tolower(x) else x }
|
/R/cg_bills.R
|
permissive
|
sorensje/rsunlight
|
R
| false
| false
| 5,278
|
r
|
#' Gets details for bills.
#'
#' Data on bills in Congress goes back to 2009, and comes from a mix of sources:
#' \itemize{
#' \item Scrapers at github.com/unitedstates for most data, including core status and history
#' information.
#' \item Bulk data at GPO's FDSys for version information, and full text.
#' \item The House' MajorityLeader.gov and Senate Democrats' official site for notices of upcoming
#' debate.
#' }
#'
#' @export
#' @template bills
#' @template cg
#'
#' @details
#' History: The history field includes useful flags and dates/times in a bill's life. The above is
#' a real-life example of H.R. 3590 - not all fields will be present for every bill. Time fields
#' can hold either dates or times - Congress is inconsistent about providing specific timestamps.
#'
#' @return Committee details including subcommittees and all members.
#'
#' @examples \dontrun{
#' # Bill lookup (i.e., filter)
#' cg_bills(congress=113, history.enacted=TRUE)
#' cg_bills(history.active=TRUE, order='last_action_at')
#' cg_bills(sponsor.party='R', history.vetoed=TRUE)
#' cg_bills(enacted_as.law_type='private', order='history.enacted_at')
#' cg_bills(bill_type__in='hjres|sjres', history.house_passage_result__exists=TRUE,
#' history.senate_passage_result__exists=TRUE)
#'
#' # Bill search
#' cg_bills(query='health care')
#' cg_bills(query='health care', history.enacted=TRUE)
#' cg_bills(query='freedom of information')
#' cg_bills(query='"freedom of information" accountab*')
#' cg_bills(query='transparency accountability'~5, highlight=TRUE)
#'
#' # Disable pagination
#' cg_bills(per_page='all')
#' }
cg_bills <- function(query = NULL, bill_id = NULL, bill_type = NULL, number = NULL,
congress = NULL, chamber = NULL, introduced_on = NULL, last_action_at = NULL,
last_vote_at = NULL, last_version_on = NULL, highlight = NULL, history.active = NULL,
history.active_at = NULL, history.house_passage_result = NULL,
history.house_passage_result_at = NULL, history.senate_cloture_result = NULL,
history.senate_cloture_result_at = NULL, history.senate_passage_result = NULL,
history.senate_passage_result_at = NULL, history.vetoed = NULL, history.vetoed_at = NULL,
history.house_override_result = NULL, history.house_override_result_at = NULL,
history.senate_override_result = NULL, history.senate_override_result_at = NULL,
history.awaiting_signature = NULL, history.awaiting_signature_since = NULL,
history.enacted = NULL, history.enacted_at = NULL,
sponsor.party = NULL, enacted_as.law_type = NULL, bill_type__in = NULL,
history.house_passage_result__exists = NULL, history.senate_passage_result__exists = NULL,
nicknames=NULL, keywords=NULL, sponsor_id=NULL, cosponsor_ids=NULL, cosponsors_count=NULL,
withdrawn_cosponsors_count=NULL, withdrawn_cosponsor_ids=NULL, committee_ids=NULL,
related_bill_ids=NULL, enacted_as.congress=NULL,
enacted_as.number=NULL, fields=NULL, page = 1, per_page = 20, order = NULL,
key=getOption("SunlightLabsKey", stop("need an API key for Sunlight Labs")),
return='table', ...)
{
if(is.null(query)){
url <- paste0(cgurl(), '/bills')
} else {
url <- paste0(cgurl(), '/bills/search')
}
args <- suncompact(list(apikey=key,query=query,bill_id=bill_id,bill_type=bill_type,
number=number,congress=congress,chamber=chamber,introduced_on=introduced_on,
last_action_at=last_action_at,last_vote_at=last_vote_at,last_version_on=last_version_on,
highlight=highlight,history.active=ll(history.active), history.active_at=history.active_at,
history.house_passage_result=history.house_passage_result,
history.house_passage_result_at=history.house_passage_result_at,
history.senate_cloture_result=history.senate_cloture_result,
history.senate_cloture_result_at=history.senate_cloture_result_at,
history.senate_passage_result=history.senate_passage_result,
history.senate_passage_result_at=history.senate_passage_result_at,
history.vetoed=ll(history.vetoed), history.vetoed_at=history.vetoed_at,
history.house_override_result=history.house_override_result,
history.house_override_result_at=history.house_override_result_at,
history.senate_override_result=history.senate_override_result,
history.senate_override_result_at=history.senate_override_result_at,
history.awaiting_signature=ll(history.awaiting_signature),
history.awaiting_signature_since=history.awaiting_signature_since,
history.enacted=ll(history.enacted), history.enacted_at=history.enacted_at,
sponsor.party=sponsor.party,order=order,enacted_as.law_type=enacted_as.law_type,
bill_type__in=bill_type__in,
history.house_passage_result__exists=history.house_passage_result__exists,
history.senate_passage_result__exists=history.senate_passage_result__exists,
page=page,per_page=per_page,fields=fields,
nicknames=nicknames, keywords=keywords, sponsor_id=sponsor_id, cosponsor_ids=cosponsor_ids,
cosponsors_count=cosponsors_count, withdrawn_cosponsors_count=withdrawn_cosponsors_count,
committee_ids=committee_ids, related_bill_ids=related_bill_ids, enacted_as.congress=enacted_as.congress,
enacted_as.number=enacted_as.number))
return_obj(return, query(url, args, ...))
}
ll <- function(x) if(!is.null(x)){ if(x) tolower(x) else x }
|
testlist <- list(hi = 0, lo = 8.89318162514244e-323, mu = 0, sig = 0)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result)
|
/gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610047600-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 126
|
r
|
testlist <- list(hi = 0, lo = 8.89318162514244e-323, mu = 0, sig = 0)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result)
|
##################
## categoryToEntrezBuilder
## Create a mapping from the categories to the Entrez ids
setMethod("categoryToEntrezBuilder",
signature(p="KEGGListHyperGParams"),
function(p) {
keep.all <- switch(testDirection(p),
over=FALSE,
under=TRUE,
stop("Bad testDirection slot"))
geneIds <- unique(unlist(geneIds(p)))
lib <- annotation(p)
isORGEG = grep("org.*.eg", lib)
if( length(isORGEG) > 0 )
kegg2allprobes <- Category:::getDataEnv("PATH2EG", lib)
else
kegg2allprobes <- Category:::getDataEnv("PATH2PROBE", lib)
probeAnnot <- Category:::getKeggToProbeMap(kegg2allprobes)
Category:::probeToEntrezMapHelper(probeAnnot, geneIds, p@datPkg, universeGeneIds(p),
keep.all=keep.all)
})
######################
## universeBuilder
## It returns the Entrez ids from the supplied universe that
## have at least one KEGG annotation
setMethod("universeBuilder", signature=(p="KEGGListHyperGParams"),
function(p) {
entrezIds <- universeGeneIds(p)
SQL <- "select distinct gene_id from genes, kegg where genes._id = kegg._id"
db <- do.call(paste(p@annotation, sep="_", "dbconn"), list())
univ <- dbGetQuery(db, SQL)[[1]]
if (!is.null(entrezIds) && length(entrezIds) > 0)
univ <- intersect(univ, unlist(entrezIds))
if (length(univ) < 1)
stop("No Entrez Gene ids left in universe")
univ
})
#####################
## hyperGTest
setMethod("hyperGTest",
signature(p="KEGGListHyperGParams"), function(p) {
res <- isa.ListHyperGTest(p)
do.call(new, c("KEGGListHyperGResult", res))
})
setMethod("htmlReport", signature(r="KEGGListHyperGResult"),
function(r, file="", append=FALSE, label="", digits=3, summary.args=NULL) {
library(xtable)
library(KEGG.db)
summ <- do.call("summary", c(list(r), summary.args))
for (i in seq_along(summ)) {
summ[[i]]$Pathway <- unlist(mget(rownames(summ[[i]]), KEGGPATHID2NAME))
}
res <- lapply(summ, html.df, label=label, digits=digits,
display=c("s", "g", "g", "g", "g", "g", "g", "s"))
if (!is.null(file)) {
do.call("cat", c(res, list(file=file, sep="\n\n", append=append)))
invisible(res)
} else {
res
}
})
ISAKEGG <- function(modules,
ann=annotation(modules),
features=featureNames(modules),
hgCutoff=0.05,
correction=TRUE, correction.method="holm") {
isa2:::isa.status("Calculating KEGG enrichment", "in")
library(paste(sep="", ann, ".db"), character.only=TRUE)
library(KEGG.db)
ENTREZ <- get(paste(sep="", ann, "ENTREZID"))
selectedEntrezIds <- getFeatureNames(modules)
selectedEntrezIds <- lapply(selectedEntrezIds,
function(x) unlist(mget(x, ENTREZ)))
selectedEntrezIds <- lapply(selectedEntrezIds, unique)
entrezUniverse <- unique(unlist(mget(features, ENTREZ)))
params <-
try( new("KEGGListHyperGParams", geneIds = selectedEntrezIds,
universeGeneIds = entrezUniverse, annotation = ann,
pvalueCutoff = hgCutoff, testDirection = "over", drive=TRUE) )
hgOver <- hyperGTest(params)
if (correction) {
for (i in seq_along(hgOver@reslist)) {
hgOver@reslist[[i]]$Pvalue <- p.adjust(hgOver@reslist[[i]]$Pvalue,
method=correction.method)
}
}
isa2:::isa.status("DONE", "out")
hgOver
}
|
/eisa/R/KEGG.R
|
no_license
|
gaborcsardi/ISA
|
R
| false
| false
| 3,939
|
r
|
##################
## categoryToEntrezBuilder
## Create a mapping from the categories to the Entrez ids
setMethod("categoryToEntrezBuilder",
signature(p="KEGGListHyperGParams"),
function(p) {
keep.all <- switch(testDirection(p),
over=FALSE,
under=TRUE,
stop("Bad testDirection slot"))
geneIds <- unique(unlist(geneIds(p)))
lib <- annotation(p)
isORGEG = grep("org.*.eg", lib)
if( length(isORGEG) > 0 )
kegg2allprobes <- Category:::getDataEnv("PATH2EG", lib)
else
kegg2allprobes <- Category:::getDataEnv("PATH2PROBE", lib)
probeAnnot <- Category:::getKeggToProbeMap(kegg2allprobes)
Category:::probeToEntrezMapHelper(probeAnnot, geneIds, p@datPkg, universeGeneIds(p),
keep.all=keep.all)
})
######################
## universeBuilder
## It returns the Entrez ids from the supplied universe that
## have at least one KEGG annotation
setMethod("universeBuilder", signature=(p="KEGGListHyperGParams"),
function(p) {
entrezIds <- universeGeneIds(p)
SQL <- "select distinct gene_id from genes, kegg where genes._id = kegg._id"
db <- do.call(paste(p@annotation, sep="_", "dbconn"), list())
univ <- dbGetQuery(db, SQL)[[1]]
if (!is.null(entrezIds) && length(entrezIds) > 0)
univ <- intersect(univ, unlist(entrezIds))
if (length(univ) < 1)
stop("No Entrez Gene ids left in universe")
univ
})
#####################
## hyperGTest
setMethod("hyperGTest",
signature(p="KEGGListHyperGParams"), function(p) {
res <- isa.ListHyperGTest(p)
do.call(new, c("KEGGListHyperGResult", res))
})
setMethod("htmlReport", signature(r="KEGGListHyperGResult"),
function(r, file="", append=FALSE, label="", digits=3, summary.args=NULL) {
library(xtable)
library(KEGG.db)
summ <- do.call("summary", c(list(r), summary.args))
for (i in seq_along(summ)) {
summ[[i]]$Pathway <- unlist(mget(rownames(summ[[i]]), KEGGPATHID2NAME))
}
res <- lapply(summ, html.df, label=label, digits=digits,
display=c("s", "g", "g", "g", "g", "g", "g", "s"))
if (!is.null(file)) {
do.call("cat", c(res, list(file=file, sep="\n\n", append=append)))
invisible(res)
} else {
res
}
})
ISAKEGG <- function(modules,
ann=annotation(modules),
features=featureNames(modules),
hgCutoff=0.05,
correction=TRUE, correction.method="holm") {
isa2:::isa.status("Calculating KEGG enrichment", "in")
library(paste(sep="", ann, ".db"), character.only=TRUE)
library(KEGG.db)
ENTREZ <- get(paste(sep="", ann, "ENTREZID"))
selectedEntrezIds <- getFeatureNames(modules)
selectedEntrezIds <- lapply(selectedEntrezIds,
function(x) unlist(mget(x, ENTREZ)))
selectedEntrezIds <- lapply(selectedEntrezIds, unique)
entrezUniverse <- unique(unlist(mget(features, ENTREZ)))
params <-
try( new("KEGGListHyperGParams", geneIds = selectedEntrezIds,
universeGeneIds = entrezUniverse, annotation = ann,
pvalueCutoff = hgCutoff, testDirection = "over", drive=TRUE) )
hgOver <- hyperGTest(params)
if (correction) {
for (i in seq_along(hgOver@reslist)) {
hgOver@reslist[[i]]$Pvalue <- p.adjust(hgOver@reslist[[i]]$Pvalue,
method=correction.method)
}
}
isa2:::isa.status("DONE", "out")
hgOver
}
|
\name{plotOptimResultsPan}
\alias{plotOptimResultsPan}
\title{
Plots the data and simulated values from any CellNOptR formalism
}
\description{
This function plots the data and simulated values according to each experiment in CNOlist. The data is shown as black triangles and the simulation by a blue dashed line. The combination of cues is given by a panel where black denotes the presence and white the absence of the cue. The goodness-of-fit between model and data is color-coded on a continuous scale from white to red.
}
\usage{
plotOptimResultsPan(simResults, yInterpol=NULL, xCoords=NULL,
CNOlist=CNOlist, formalism=c("ss1","ss2","ssN","dt","ode"), pdf=FALSE,
pdfFileName="", tPt=NULL,
plotParams = list(margin = 0.1, width=15, height=12, cmap_scale=1, cex=1.6,
ymin=NULL, Fac=1, rotation=0))
}
\arguments{
\item{simResults}{
A list with a field for each time point, each containing a matrix of dimensions (number of conditions) * (number of signals), with the first field being t0. Typically produced by simulating a model and then extracting the columns that correspond to signals.
}
\item{yInterpol}{
If using CNORdt, these are the interpolated experimental results from getFitTimeScale() that are needed to compare against the Boolean simulation.
}
\item{xCoords}{
These are the x-coordinates obtained from the optimized scaling factor in CNORdt that allow for comparison between time course experimental data and a Boolean model.
}
\item{CNOlist}{
A CNOlist.
}
\item{formalism}{
An abbreviation of the CellNOptR formalism being used.
}
\item{pdf}{
A Boolean argument denoting whether to print the figure produced by this function to file.
}
\item{pdfFileName}{
If printing to file, the filename to be used.
}
\item{tPt}{
The number of time points in the data.
}
\item{plotParams}{
a list of option related to the PDF and plotting outputs. Currently, the
following attributes are used: (1) margin of the boxes, (2) width and heigth used while creating the
PDF, (3) cmap_scale a value that scales the colors towards small errors (<1) or
large errors (>1); default is 1 (linear colormap) (4) cex is the fontsize used
in the header (5) ymin sets the minimum y axis limit; by default it is the
minimum value found over all data points and therefore can be negative.
}
}
\details{
Depending on the logic formalism, this function is generally called from cutAndPlotResults*(). As shown in the example below however, it can plot the fit of any data and corresponding compatible model. The color denotes the goodness-of-fit, where white shows no difference between simulation and data and red is the maximum error from all conditions and readouts.
}
\value{
This function does not return a value.
}
\references{
J. Saez-Rodriguez, L. G. Alexopoulos, J. Epperlein, R. Samaga, D. A. Lauffenburger, S. Klamt and P. K. Sorger. Discrete logic modeling as a means to link protein signaling networks with functional analysis of
mammalian signal transduction, Molecular Systems Biology, 5:331, 2009.
}
\author{
A. MacNamara
}
\seealso{
cutAndPlotResultsT1
}
\examples{
data(CNOlistToy,package="CellNOptR")
data(ToyModel,package="CellNOptR")
indicesToy <- indexFinder(CNOlistToy, ToyModel, verbose=TRUE)
ToyFields4Sim <- prep4sim(ToyModel)
# simulate model
simRes <- simulatorT1(CNOlist=CNOlistToy, model=ToyModel, simList=ToyFields4Sim, indexList=indicesToy)
simRes = simRes[, indicesToy$signals]
# format data and results
simResults <- list(t0=matrix(data=0, nrow=dim(simRes)[1], ncol=dim(simRes)[2]), t1=simRes)
# plot
plotOptimResultsPan(simResults,
CNOlist=CNOlistToy,
formalism="ss1",
pdf=FALSE,
tPt=10
)
}
|
/man/plotOptimResultsPan.Rd
|
no_license
|
saezlab/CellNOptR
|
R
| false
| false
| 3,623
|
rd
|
\name{plotOptimResultsPan}
\alias{plotOptimResultsPan}
\title{
Plots the data and simulated values from any CellNOptR formalism
}
\description{
This function plots the data and simulated values according to each experiment in CNOlist. The data is shown as black triangles and the simulation by a blue dashed line. The combination of cues is given by a panel where black denotes the presence and white the absence of the cue. The goodness-of-fit between model and data is color-coded on a continuous scale from white to red.
}
\usage{
plotOptimResultsPan(simResults, yInterpol=NULL, xCoords=NULL,
CNOlist=CNOlist, formalism=c("ss1","ss2","ssN","dt","ode"), pdf=FALSE,
pdfFileName="", tPt=NULL,
plotParams = list(margin = 0.1, width=15, height=12, cmap_scale=1, cex=1.6,
ymin=NULL, Fac=1, rotation=0))
}
\arguments{
\item{simResults}{
A list with a field for each time point, each containing a matrix of dimensions (number of conditions) * (number of signals), with the first field being t0. Typically produced by simulating a model and then extracting the columns that correspond to signals.
}
\item{yInterpol}{
If using CNORdt, these are the interpolated experimental results from getFitTimeScale() that are needed to compare against the Boolean simulation.
}
\item{xCoords}{
These are the x-coordinates obtained from the optimized scaling factor in CNORdt that allow for comparison between time course experimental data and a Boolean model.
}
\item{CNOlist}{
A CNOlist.
}
\item{formalism}{
An abbreviation of the CellNOptR formalism being used.
}
\item{pdf}{
A Boolean argument denoting whether to print the figure produced by this function to file.
}
\item{pdfFileName}{
If printing to file, the filename to be used.
}
\item{tPt}{
The number of time points in the data.
}
\item{plotParams}{
a list of option related to the PDF and plotting outputs. Currently, the
following attributes are used: (1) margin of the boxes, (2) width and heigth used while creating the
PDF, (3) cmap_scale a value that scales the colors towards small errors (<1) or
large errors (>1); default is 1 (linear colormap) (4) cex is the fontsize used
in the header (5) ymin sets the minimum y axis limit; by default it is the
minimum value found over all data points and therefore can be negative.
}
}
\details{
Depending on the logic formalism, this function is generally called from cutAndPlotResults*(). As shown in the example below however, it can plot the fit of any data and corresponding compatible model. The color denotes the goodness-of-fit, where white shows no difference between simulation and data and red is the maximum error from all conditions and readouts.
}
\value{
This function does not return a value.
}
\references{
J. Saez-Rodriguez, L. G. Alexopoulos, J. Epperlein, R. Samaga, D. A. Lauffenburger, S. Klamt and P. K. Sorger. Discrete logic modeling as a means to link protein signaling networks with functional analysis of
mammalian signal transduction, Molecular Systems Biology, 5:331, 2009.
}
\author{
A. MacNamara
}
\seealso{
cutAndPlotResultsT1
}
\examples{
data(CNOlistToy,package="CellNOptR")
data(ToyModel,package="CellNOptR")
indicesToy <- indexFinder(CNOlistToy, ToyModel, verbose=TRUE)
ToyFields4Sim <- prep4sim(ToyModel)
# simulate model
simRes <- simulatorT1(CNOlist=CNOlistToy, model=ToyModel, simList=ToyFields4Sim, indexList=indicesToy)
simRes = simRes[, indicesToy$signals]
# format data and results
simResults <- list(t0=matrix(data=0, nrow=dim(simRes)[1], ncol=dim(simRes)[2]), t1=simRes)
# plot
plotOptimResultsPan(simResults,
CNOlist=CNOlistToy,
formalism="ss1",
pdf=FALSE,
tPt=10
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getAreas.R
\name{getAreas}
\alias{getAreas}
\alias{getDistricts}
\title{Select and exclude areas}
\usage{
getAreas(
select = NULL,
exclude = NULL,
withClustersOnly = FALSE,
regexpSelect = TRUE,
regexpExclude = TRUE,
opts = simOptions(),
ignore.case = TRUE,
districts = NULL
)
getDistricts(
select = NULL,
exclude = NULL,
regexpSelect = TRUE,
regexpExclude = TRUE,
opts = simOptions(),
ignore.case = TRUE
)
}
\arguments{
\item{select}{Character vector. If \code{regexpSelect} is TRUE, this vector is
interpreted as a list of regular expressions. Else it is interpreted as a
list of area names. If \code{NULL}, all areas are selected}
\item{exclude}{Character vector. If \code{regexpExclude} is TRUE, this vector is
interpreted as a list of regular expressions and each area validating one
of them is excluded. Else it is interpreted as list of area names to
exclude. If \code{NULL}, not any area is excluded.}
\item{withClustersOnly}{Should the function return only nodes containing clusters ?}
\item{regexpSelect}{Is \code{select} a list of regular expressions ?}
\item{regexpExclude}{Is \code{exclude} a list of regular expressions ?}
\item{opts}{list of simulation parameters returned by the function
\code{\link{setSimulationPath}}}
\item{ignore.case}{Should the case be ignored when evaluating the regular
expressions ?}
\item{districts}{Names of districts. If this argument is not null, only areas belonging
to the specified districts are returned.}
}
\value{
A character vector containing the name of the areas/districts satisfying the
rules defined by the parameters.
}
\description{
\code{getAreas} and \code{getDistricts} are utility functions that builds
list of areas or districts by using regular expressions to select and/or
exclude areas/districts
}
\seealso{
\code{\link{getLinks}}
}
|
/man/getAreas.Rd
|
no_license
|
cran/antaresRead
|
R
| false
| true
| 1,981
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getAreas.R
\name{getAreas}
\alias{getAreas}
\alias{getDistricts}
\title{Select and exclude areas}
\usage{
getAreas(
select = NULL,
exclude = NULL,
withClustersOnly = FALSE,
regexpSelect = TRUE,
regexpExclude = TRUE,
opts = simOptions(),
ignore.case = TRUE,
districts = NULL
)
getDistricts(
select = NULL,
exclude = NULL,
regexpSelect = TRUE,
regexpExclude = TRUE,
opts = simOptions(),
ignore.case = TRUE
)
}
\arguments{
\item{select}{Character vector. If \code{regexpSelect} is TRUE, this vector is
interpreted as a list of regular expressions. Else it is interpreted as a
list of area names. If \code{NULL}, all areas are selected}
\item{exclude}{Character vector. If \code{regexpExclude} is TRUE, this vector is
interpreted as a list of regular expressions and each area validating one
of them is excluded. Else it is interpreted as list of area names to
exclude. If \code{NULL}, not any area is excluded.}
\item{withClustersOnly}{Should the function return only nodes containing clusters ?}
\item{regexpSelect}{Is \code{select} a list of regular expressions ?}
\item{regexpExclude}{Is \code{exclude} a list of regular expressions ?}
\item{opts}{list of simulation parameters returned by the function
\code{\link{setSimulationPath}}}
\item{ignore.case}{Should the case be ignored when evaluating the regular
expressions ?}
\item{districts}{Names of districts. If this argument is not null, only areas belonging
to the specified districts are returned.}
}
\value{
A character vector containing the name of the areas/districts satisfying the
rules defined by the parameters.
}
\description{
\code{getAreas} and \code{getDistricts} are utility functions that builds
list of areas or districts by using regular expressions to select and/or
exclude areas/districts
}
\seealso{
\code{\link{getLinks}}
}
|
library(tidyverse)
detach("package:MASS")
getDataPath <- function (...) {
return(file.path("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra", ...))
}
rm(list = ls())
#Windy minutes selection - get the CSV (for each index I repeated the process), read it, select the line corresponding to the selected minute and add the corresponding file name in the column called File
file1 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/JZ001_WB28/BOW-JZ1-WB28_20191014_180000.wav/Towsey.Acoustic/BOW-JZ1-WB28_20191014_180000__Towsey.Acoustic.R3D.csv")
file1 <- read.csv(file1) %>%
filter(Index == "2") %>%
mutate(., File = basename(file1))
file2 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/JZ002_WB11/BOW-JZ2-WB11_20191014_205938.wav/Towsey.Acoustic/BOW-JZ2-WB11_20191014_205938__Towsey.Acoustic.R3D.csv")
file2 <- read.csv(file2) %>%
filter(Index == "20") %>%
mutate(., File = basename(file2))
file3 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/JZ003_WB25/BOW-JZ3-WB25_20191015_065852.wav/Towsey.Acoustic/BOW-JZ3-WB25_20191015_065852__Towsey.Acoustic.R3D.csv")
file3 <- read.csv(file3) %>%
filter(Index == "18") %>%
mutate(., File = basename(file3))
file4 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM401_WB22/BOW-401-WB22_20191015_085838.wav/Towsey.Acoustic/BOW-401-WB22_20191015_085838__Towsey.Acoustic.R3D.csv")
file4 <- read.csv(file4) %>%
filter(Index == "33") %>%
mutate(., File = basename(file4))
file5 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM402_WB15/BOW-402-WB15_20191015_105827.wav/Towsey.Acoustic/BOW-402-WB15_20191015_105827__Towsey.Acoustic.R3D.csv")
file5 <- read.csv(file5) %>%
filter(Index == "14") %>%
mutate(., File = basename(file5))
file6 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM403_WB49/BOW-403-WB49_20191015_125827.wav/Towsey.Acoustic/BOW-403-WB49_20191015_125827__Towsey.Acoustic.R3D.csv")
file6 <- read.csv(file6) %>%
filter(Index == "51") %>%
mutate(., File = basename(file6))
file7 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM404_WB56/BOW-404-WB56_20191015_145807.wav/Towsey.Acoustic/BOW-404-WB56_20191015_145807__Towsey.Acoustic.R3D.csv")
file7 <- read.csv(file7) %>%
filter(Index == "56")%>%
mutate(., File = basename(file7))
file8 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM406_WB46/BOW-406-WB46_20191015_160002.wav/Towsey.Acoustic/BOW-406-WB46_20191015_160002__Towsey.Acoustic.R3D.csv")
file8 <- read.csv(file8) %>%
filter(Index == "51") %>%
mutate(., File = basename(file8))
file9 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM410_WB43/BOW-410-WB43_20191015_165807.wav/Towsey.Acoustic/BOW-410-WB43_20191015_165807__Towsey.Acoustic.R3D.csv")
file9 <- read.csv(file9) %>%
filter(Index == "45") %>%
mutate(., File = basename(file9))
file10 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM411_WB35/BOW-411-WB35_20191015_175952.wav/Towsey.Acoustic/BOW-411-WB35_20191015_175952__Towsey.Acoustic.R3D.csv")
file10 <- read.csv(file10) %>%
filter(Index == "0") %>%
mutate(., File = basename(file10))
file11 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM412_WB34/BOW-412-WB34_20191016_065852.wav/Towsey.Acoustic/BOW-412-WB34_20191016_065852__Towsey.Acoustic.R3D.csv")
file11 <- read.csv(file11) %>%
filter(Index == "14") %>%
mutate(., File = basename(file11))
file12 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM413_WB06/BOW-413-WB06_20191015_075847.wav/Towsey.Acoustic/BOW-413-WB06_20191015_075847__Towsey.Acoustic.R3D.csv")
file12 <- read.csv(file12) %>%
filter(Index == "32") %>%
mutate(., File = basename(file12))
#Rbind all the selected minutes in a csv file (this is done by index)
selected_minutes <- rbind(file1, file2, file3, file4, file5, file6, file7, file8, file9, file10, file11, file12) %>%
select(., Index, File, everything()) %>%
write.csv("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/WindRemoval_SpectralIndices_Channel1/R3D_SelectedMinutes.csv")
#Reading the full dataset with all spectral indices
df <- read.csv("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/SpectralIndices_Channel1/AllSpectral_Channel1.csv")
#Separating the column FID into transect + point; date; time and index - which we will remove because there is already a column with this name. We are keeping the column FID because in there we can find all the info we need about the row
df1 <- separate(df, col = FID, into = c("TransectPoint", "Date", "Time", "Index2"), remove = F, sep = "_")
rm(df)
df2 <- select(df1, -c(X.2, X.1, X, Index2))
rm(df1)
norm_df <- df2 %>% mutate_at(vars(6:261), scale)
rm(df2)
t <- aggregate(norm_df, by = list(norm_df$TransectPoint, norm_df$Date, norm_df$Time, norm_df$Index), FUN = mean)
t1 <- select(t, -c(FID, TransectPoint, Date, Time))
rm(t)
t2 <- mutate(t1, FID = paste0(t1$Group.1, sep = "_", t1$Group.2, sep = "_", t1$Group.3, sep = "_", t1$Group.4, sep = "_", t1$Index))
t3 <- select(t2, -c(Group.1, Group.2, Group.3, Group.4, Index))
t4 <- select(t3, FID, everything())
rm(t1, t2, t3)
t5 <- data.frame(t4, row.names = 1)
t6 <- rowMeans(t5)
#write.csv(t5, "C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/WindRemoval_SpectralIndices_Channel1/SpectralIndices_FCSAveragedPerMinute.csv")
write.csv(t5, getDataPath("Oct2019", "WindRemoval_SpectralIndices_Channel1", "SpectralIndices_FCSAveragedPerMinute.csv"))
#Creating the matrix with the euclidean distances of the points - huge one
m <- dist(t5, method = "euclidean")
#Transforming it into a mtrix
v <- data.matrix(m)
rm(t4)
#Using the reshape package to reshape the df
library(reshape2)
melted <- melt(v)
#selecting the windy minutes that were chosen to be the 0 point
melted1 <- filter(melted, Var1 == "BOW-JZ1-WB28_20191014_180000_2_2" | Var1 == "BOW-JZ2-WB11_20191014_205938_20_20" | Var1 =="BOW-JZ3-WB25_20191015_065852_18_18" | Var1 == "BOW-401-WB22_20191015_085838_33_33" | Var1 == "BOW-402-WB15_20191015_105827_14_14" | Var1 == "BOW-403-WB49_20191015_125827_51_51" | Var1 == "BOW-404-WB56_20191015_145807_56_56" | Var1 == "BOW-406-WB46_20191015_160002_51_51" | Var1 == "BOW-410-WB43_20191015_165807_45_45" | Var1 == "BOW-411-WB35_20191015_175952_0_0" | Var1 == "BOW-412-WB34_20191016_065852_14_14" | Var1 == "BOW-413-WB06_20191015_075847_32_32")
write.csv(melted1, getDataPath("Oct2019", "WindRemoval_SpectralIndices_Channel1", "SpectralIndices_FCSPerSelectedMinute.csv"))
melted1 <- read.csv(getDataPath("Oct2019", "WindRemoval_SpectralIndices_Channel1", "SpectralIndices_FCSPerSelectedMinute.csv"))
df <- read.csv(getDataPath("Oct2019", "WindRemoval_SpectralIndices_Channel1", "SpectralIndices_FCSAveragedPerMinute.csv"))
head(melted1)
#Selecting the values of distance that were equal or less than .4
melted2 <- filter(melted1, value <= "0.4") %>%
write.csv(., getDataPath("Oct2019", "WindRemoval_SpectralIndices_Channel1", "SpectralIndices_WindMinAbove0.4.csv"))
x <- list(unique(melted2$Var2)) %>%
write.csv()
summary_all <- read.csv("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/SummaryIndices_Channel1_Prepared/indices_all.csv")
directory <- setwd("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/")
output_dir <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/SummaryIndices_Channel1_Prepared/")
files <- list.files(directory, pattern = ".Indices.csv", full.names = T, recursive = T)
name <- basename(files)
name <- gsub(pattern = "__Towsey.Acoustic.Indices.csv", replacement = "", x = name)
files <- as.list(files)
df <- lapply(files, read.csv) %>%
lapply(files, mutate(FID =paste(FileName, ResultMinute, sep = "_")))
df <- select(df, BackgroundNoise, Snr, Activity, EventsPerSecond, HighFreqCover, MidFreqCover, LowFreqCover, AcousticComplexity, TemporalEntropy, EntropyOfAverageSpectrum, EntropyOfPeaksSpectrum, EntropyOfVarianceSpectrum, ClusterCount, Ndsi, SptDensity, FileName, ResultMinute)
norm_df <- df %>% mutate_at(vars(1:15), scale)
library(stringr)
melted2 <- read.csv(getDataPath("Fieldwork_Bowra", "Oct2019", "WindRemoval_SpectralIndices_Channel1", "SpectralIndices_WindMinAbove0.4.csv")) %>%
mutate(FID = str_sub(Var2, start = 1, end = 31))
windy_minutes <- as.list(unique(melted2$FID))
#Getting the summary Indices and eliminating the windy minutes, pasting it all together to build the distance matrix#
files <- list.files(directory, pattern = ".Indices.csv", full.names = T, recursive = T)
name <- basename(files)
name <- gsub(pattern = "__Towsey.Acoustic.Indices.csv", replacement = "", x = name)
files <- as.list(files)
df <- lapply(files, read.csv) %>%
map(~ mutate(., FID = paste(.$FileName, .$ResultMinute, sep = "_"))) %>%
map(~ mutate(., wind = match(FID, windy_minutes, nomatch = "GoodMinutes", incomparables = "NA"))) %>%
#map(~ filter(., wind == 0)) %>%
map(~ select(., BackgroundNoise, Snr, Activity, EventsPerSecond, HighFreqCover, MidFreqCover, LowFreqCover, AcousticComplexity, TemporalEntropy, EntropyOfAverageSpectrum, EntropyOfPeaksSpectrum, EntropyOfVarianceSpectrum, ClusterCount, Ndsi, SptDensity, FileName, ResultMinute, wind, FID)) %>%
map(~ mutate_at(., vars(1:15), scale)) %>%
map(~ separate(., col = FileName, into = c("Location", "Recorder", "PointData"), remove = F)) %>%
do.call(rbind, .) %>%
write.csv(., getDataPath("Fieldwork_Bowra", "Oct2019", "WindRemoval_SpectralIndices_Channel1", "07.02.2020_SummaryIndices_Channel1_WindRemoved_test.csv"))
library(tidyverse)
test1 <- read.csv("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/SummaryIndices_Channel1_Prepared/indices_all1.csv") %>%
mutate(., FID = paste(.$FileName, .$ResultMinute, sep = "_")) %>%
mutate(., wind = match(FID, windy_minutes, nomatch = 0, incomparables = "NA")) %>%
filter(., wind == 0) %>%
select(., BackgroundNoise, Snr, Activity, EventsPerSecond, HighFreqCover, MidFreqCover, LowFreqCover, AcousticComplexity, TemporalEntropy, EntropyOfAverageSpectrum, EntropyOfPeaksSpectrum, EntropyOfVarianceSpectrum, ClusterCount, Ndsi, SptDensity, FileName, wind, ResultMinute, FID) %>%
mutate_at(., vars(1:15), scale) %>%
separate(., col = FileName, into = c("Location", "Recorder", "PointData"), remove = F) %>%
write.csv(., getDataPath("Fieldwork_Bowra", "Oct2019", "WindRemoval_SpectralIndices_Channel1", "SummaryIndices_Channel1_WindRemoved.csv"))
|
/Chapter1_FineScaleAcousticSurvey/SM_Preparation_SpectralIndices.R
|
permissive
|
QutEcoacoustics/spatial-acoustics
|
R
| false
| false
| 11,883
|
r
|
library(tidyverse)
detach("package:MASS")
getDataPath <- function (...) {
return(file.path("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra", ...))
}
rm(list = ls())
#Windy minutes selection - get the CSV (for each index I repeated the process), read it, select the line corresponding to the selected minute and add the corresponding file name in the column called File
file1 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/JZ001_WB28/BOW-JZ1-WB28_20191014_180000.wav/Towsey.Acoustic/BOW-JZ1-WB28_20191014_180000__Towsey.Acoustic.R3D.csv")
file1 <- read.csv(file1) %>%
filter(Index == "2") %>%
mutate(., File = basename(file1))
file2 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/JZ002_WB11/BOW-JZ2-WB11_20191014_205938.wav/Towsey.Acoustic/BOW-JZ2-WB11_20191014_205938__Towsey.Acoustic.R3D.csv")
file2 <- read.csv(file2) %>%
filter(Index == "20") %>%
mutate(., File = basename(file2))
file3 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/JZ003_WB25/BOW-JZ3-WB25_20191015_065852.wav/Towsey.Acoustic/BOW-JZ3-WB25_20191015_065852__Towsey.Acoustic.R3D.csv")
file3 <- read.csv(file3) %>%
filter(Index == "18") %>%
mutate(., File = basename(file3))
file4 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM401_WB22/BOW-401-WB22_20191015_085838.wav/Towsey.Acoustic/BOW-401-WB22_20191015_085838__Towsey.Acoustic.R3D.csv")
file4 <- read.csv(file4) %>%
filter(Index == "33") %>%
mutate(., File = basename(file4))
file5 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM402_WB15/BOW-402-WB15_20191015_105827.wav/Towsey.Acoustic/BOW-402-WB15_20191015_105827__Towsey.Acoustic.R3D.csv")
file5 <- read.csv(file5) %>%
filter(Index == "14") %>%
mutate(., File = basename(file5))
file6 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM403_WB49/BOW-403-WB49_20191015_125827.wav/Towsey.Acoustic/BOW-403-WB49_20191015_125827__Towsey.Acoustic.R3D.csv")
file6 <- read.csv(file6) %>%
filter(Index == "51") %>%
mutate(., File = basename(file6))
file7 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM404_WB56/BOW-404-WB56_20191015_145807.wav/Towsey.Acoustic/BOW-404-WB56_20191015_145807__Towsey.Acoustic.R3D.csv")
file7 <- read.csv(file7) %>%
filter(Index == "56")%>%
mutate(., File = basename(file7))
file8 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM406_WB46/BOW-406-WB46_20191015_160002.wav/Towsey.Acoustic/BOW-406-WB46_20191015_160002__Towsey.Acoustic.R3D.csv")
file8 <- read.csv(file8) %>%
filter(Index == "51") %>%
mutate(., File = basename(file8))
file9 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM410_WB43/BOW-410-WB43_20191015_165807.wav/Towsey.Acoustic/BOW-410-WB43_20191015_165807__Towsey.Acoustic.R3D.csv")
file9 <- read.csv(file9) %>%
filter(Index == "45") %>%
mutate(., File = basename(file9))
file10 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM411_WB35/BOW-411-WB35_20191015_175952.wav/Towsey.Acoustic/BOW-411-WB35_20191015_175952__Towsey.Acoustic.R3D.csv")
file10 <- read.csv(file10) %>%
filter(Index == "0") %>%
mutate(., File = basename(file10))
file11 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM412_WB34/BOW-412-WB34_20191016_065852.wav/Towsey.Acoustic/BOW-412-WB34_20191016_065852__Towsey.Acoustic.R3D.csv")
file11 <- read.csv(file11) %>%
filter(Index == "14") %>%
mutate(., File = basename(file11))
file12 <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/SM413_WB06/BOW-413-WB06_20191015_075847.wav/Towsey.Acoustic/BOW-413-WB06_20191015_075847__Towsey.Acoustic.R3D.csv")
file12 <- read.csv(file12) %>%
filter(Index == "32") %>%
mutate(., File = basename(file12))
#Rbind all the selected minutes in a csv file (this is done by index)
selected_minutes <- rbind(file1, file2, file3, file4, file5, file6, file7, file8, file9, file10, file11, file12) %>%
select(., Index, File, everything()) %>%
write.csv("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/WindRemoval_SpectralIndices_Channel1/R3D_SelectedMinutes.csv")
#Reading the full dataset with all spectral indices
df <- read.csv("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/SpectralIndices_Channel1/AllSpectral_Channel1.csv")
#Separating the column FID into transect + point; date; time and index - which we will remove because there is already a column with this name. We are keeping the column FID because in there we can find all the info we need about the row
df1 <- separate(df, col = FID, into = c("TransectPoint", "Date", "Time", "Index2"), remove = F, sep = "_")
rm(df)
df2 <- select(df1, -c(X.2, X.1, X, Index2))
rm(df1)
norm_df <- df2 %>% mutate_at(vars(6:261), scale)
rm(df2)
t <- aggregate(norm_df, by = list(norm_df$TransectPoint, norm_df$Date, norm_df$Time, norm_df$Index), FUN = mean)
t1 <- select(t, -c(FID, TransectPoint, Date, Time))
rm(t)
t2 <- mutate(t1, FID = paste0(t1$Group.1, sep = "_", t1$Group.2, sep = "_", t1$Group.3, sep = "_", t1$Group.4, sep = "_", t1$Index))
t3 <- select(t2, -c(Group.1, Group.2, Group.3, Group.4, Index))
t4 <- select(t3, FID, everything())
rm(t1, t2, t3)
t5 <- data.frame(t4, row.names = 1)
t6 <- rowMeans(t5)
#write.csv(t5, "C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/WindRemoval_SpectralIndices_Channel1/SpectralIndices_FCSAveragedPerMinute.csv")
write.csv(t5, getDataPath("Oct2019", "WindRemoval_SpectralIndices_Channel1", "SpectralIndices_FCSAveragedPerMinute.csv"))
#Creating the matrix with the euclidean distances of the points - huge one
m <- dist(t5, method = "euclidean")
#Transforming it into a mtrix
v <- data.matrix(m)
rm(t4)
#Using the reshape package to reshape the df
library(reshape2)
melted <- melt(v)
#selecting the windy minutes that were chosen to be the 0 point
melted1 <- filter(melted, Var1 == "BOW-JZ1-WB28_20191014_180000_2_2" | Var1 == "BOW-JZ2-WB11_20191014_205938_20_20" | Var1 =="BOW-JZ3-WB25_20191015_065852_18_18" | Var1 == "BOW-401-WB22_20191015_085838_33_33" | Var1 == "BOW-402-WB15_20191015_105827_14_14" | Var1 == "BOW-403-WB49_20191015_125827_51_51" | Var1 == "BOW-404-WB56_20191015_145807_56_56" | Var1 == "BOW-406-WB46_20191015_160002_51_51" | Var1 == "BOW-410-WB43_20191015_165807_45_45" | Var1 == "BOW-411-WB35_20191015_175952_0_0" | Var1 == "BOW-412-WB34_20191016_065852_14_14" | Var1 == "BOW-413-WB06_20191015_075847_32_32")
write.csv(melted1, getDataPath("Oct2019", "WindRemoval_SpectralIndices_Channel1", "SpectralIndices_FCSPerSelectedMinute.csv"))
melted1 <- read.csv(getDataPath("Oct2019", "WindRemoval_SpectralIndices_Channel1", "SpectralIndices_FCSPerSelectedMinute.csv"))
df <- read.csv(getDataPath("Oct2019", "WindRemoval_SpectralIndices_Channel1", "SpectralIndices_FCSAveragedPerMinute.csv"))
head(melted1)
#Selecting the values of distance that were equal or less than .4
melted2 <- filter(melted1, value <= "0.4") %>%
write.csv(., getDataPath("Oct2019", "WindRemoval_SpectralIndices_Channel1", "SpectralIndices_WindMinAbove0.4.csv"))
x <- list(unique(melted2$Var2)) %>%
write.csv()
summary_all <- read.csv("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/SummaryIndices_Channel1_Prepared/indices_all.csv")
directory <- setwd("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/ResultsIndices_Channel1/")
output_dir <- ("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/SummaryIndices_Channel1_Prepared/")
files <- list.files(directory, pattern = ".Indices.csv", full.names = T, recursive = T)
name <- basename(files)
name <- gsub(pattern = "__Towsey.Acoustic.Indices.csv", replacement = "", x = name)
files <- as.list(files)
df <- lapply(files, read.csv) %>%
lapply(files, mutate(FID =paste(FileName, ResultMinute, sep = "_")))
df <- select(df, BackgroundNoise, Snr, Activity, EventsPerSecond, HighFreqCover, MidFreqCover, LowFreqCover, AcousticComplexity, TemporalEntropy, EntropyOfAverageSpectrum, EntropyOfPeaksSpectrum, EntropyOfVarianceSpectrum, ClusterCount, Ndsi, SptDensity, FileName, ResultMinute)
norm_df <- df %>% mutate_at(vars(1:15), scale)
library(stringr)
melted2 <- read.csv(getDataPath("Fieldwork_Bowra", "Oct2019", "WindRemoval_SpectralIndices_Channel1", "SpectralIndices_WindMinAbove0.4.csv")) %>%
mutate(FID = str_sub(Var2, start = 1, end = 31))
windy_minutes <- as.list(unique(melted2$FID))
#Getting the summary Indices and eliminating the windy minutes, pasting it all together to build the distance matrix#
files <- list.files(directory, pattern = ".Indices.csv", full.names = T, recursive = T)
name <- basename(files)
name <- gsub(pattern = "__Towsey.Acoustic.Indices.csv", replacement = "", x = name)
files <- as.list(files)
df <- lapply(files, read.csv) %>%
map(~ mutate(., FID = paste(.$FileName, .$ResultMinute, sep = "_"))) %>%
map(~ mutate(., wind = match(FID, windy_minutes, nomatch = "GoodMinutes", incomparables = "NA"))) %>%
#map(~ filter(., wind == 0)) %>%
map(~ select(., BackgroundNoise, Snr, Activity, EventsPerSecond, HighFreqCover, MidFreqCover, LowFreqCover, AcousticComplexity, TemporalEntropy, EntropyOfAverageSpectrum, EntropyOfPeaksSpectrum, EntropyOfVarianceSpectrum, ClusterCount, Ndsi, SptDensity, FileName, ResultMinute, wind, FID)) %>%
map(~ mutate_at(., vars(1:15), scale)) %>%
map(~ separate(., col = FileName, into = c("Location", "Recorder", "PointData"), remove = F)) %>%
do.call(rbind, .) %>%
write.csv(., getDataPath("Fieldwork_Bowra", "Oct2019", "WindRemoval_SpectralIndices_Channel1", "07.02.2020_SummaryIndices_Channel1_WindRemoved_test.csv"))
library(tidyverse)
test1 <- read.csv("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Fieldwork_Bowra/Oct2019/SummaryIndices_Channel1_Prepared/indices_all1.csv") %>%
mutate(., FID = paste(.$FileName, .$ResultMinute, sep = "_")) %>%
mutate(., wind = match(FID, windy_minutes, nomatch = 0, incomparables = "NA")) %>%
filter(., wind == 0) %>%
select(., BackgroundNoise, Snr, Activity, EventsPerSecond, HighFreqCover, MidFreqCover, LowFreqCover, AcousticComplexity, TemporalEntropy, EntropyOfAverageSpectrum, EntropyOfPeaksSpectrum, EntropyOfVarianceSpectrum, ClusterCount, Ndsi, SptDensity, FileName, wind, ResultMinute, FID) %>%
mutate_at(., vars(1:15), scale) %>%
separate(., col = FileName, into = c("Location", "Recorder", "PointData"), remove = F) %>%
write.csv(., getDataPath("Fieldwork_Bowra", "Oct2019", "WindRemoval_SpectralIndices_Channel1", "SummaryIndices_Channel1_WindRemoved.csv"))
|
\name{KIRBY}
\alias{KIRBY}
\docType{data}
\title{
KIRBY Study Pancreatic Cancer Dataset
}
\description{
An expresion set object containing pancreatic cancer data
}
\format{
eSet object
}
\details{
more details can be found in the experimentData section of the object
}
\source{
https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE79670
}
\references{
Kirby et al., Mol Oncol 2016
}
\examples{
KIRBYEset = loadPancreasEsets()$esets$KIRBY
experimentData(KIRBYEset)
}
\keyword{datasets}
|
/man/KIRBY.Rd
|
no_license
|
fiddlerOnDaRoof/MetaGxPancreas
|
R
| false
| false
| 496
|
rd
|
\name{KIRBY}
\alias{KIRBY}
\docType{data}
\title{
KIRBY Study Pancreatic Cancer Dataset
}
\description{
An expresion set object containing pancreatic cancer data
}
\format{
eSet object
}
\details{
more details can be found in the experimentData section of the object
}
\source{
https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE79670
}
\references{
Kirby et al., Mol Oncol 2016
}
\examples{
KIRBYEset = loadPancreasEsets()$esets$KIRBY
experimentData(KIRBYEset)
}
\keyword{datasets}
|
# Load packages
library(shiny)
library(shinydashboard)
library(shinycssloaders)
# Source necessary files
source("global.R")
source("inputModule.R")
source("leaflet_module.R")
source("addressModule.R")
# Define UI for shiny app
ui <- dashboardPage(
dashboardHeader(title = "Guilford COVID-19 Resource Finder"),
dashboardSidebar(
sidebarMenu(
menuItem("Help Video", tabName = "helpVideo", icon = icon("play-circle")),
menuItem("Resources", tabName = "resources", icon = icon("th-list")),
filterDataInputsUI("selections",
categories = unique(resources$Category)),
addressInputUI("address_input")
)
),
dashboardBody(
tabItems(
tabItem(tabName = "helpVideo",
fluidRow(
column(width = 3),
column(width = 6,
title = "YT Video Here",
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/ScMzIvxBSi4" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
),
column(width = 3)
), # closing fluidRow
), # Closing helpVideo tabItem
tabItem(tabName = "resources",
fluidRow(
column(width = 2),
valueBoxOutput(width = 4, "programTotal"),
valueBoxOutput(width = 4, "programValue"),
column(width = 2)
), # closing fluidRow
fluidRow(
box(title = 'Map of Resources',
width = 12,
withSpinner(leafletMapUI("main_map"), type = 6)),
uiOutput("programinfo")
) # closing fluidRow
) # Closing resources tabItem
) # Closing tabItems
) # Closing dashboardBody
) # Closing dashboardPage
# Define server logic
server <- (function(input, output, session) {
# Coll Module for input selections and store as an object to reuse
selections <- callModule(filterDataServer, "selections", df = resources)
# Create a filtered dataset based on user inputs from input module
final_df <- reactive({
resources %>%
filter(if (is.null(selections$categories())) !(Category %in% selections$categories()) else Category %in% selections$categories()) %>%
filter(if (is.null(selections$cities())) !(City %in% selections$cities()) else City %in% selections$cities()) %>%
filter(if (is.null(selections$programs())) !(Program %in% selections$programs()) else Program %in% selections$programs())
})
# Call address entry module and store in a object to reuse
res <- callModule(addressInputServer, "address_input")
# Call leaflet module and pass both reactive data objects to module
callModule(leafletMapServer, "main_map", map_dat = final_df, add_dat = res)
# Non-module UI components below
## Total Programs value box
output$programTotal <- renderValueBox({
valueBox(
paste0(nrow(resources)), "Total Programs", icon = icon("list"),
color = "blue"
)
})
## Reactive calculated value box for filtered dataset
output$programValue <- renderValueBox({
valueBox(
paste0(nrow(final_df())), "# of filtered Programs", icon = icon("list"),
color = "green"
)
})
## Reactively generated UI for program information below map
output$programinfo <- renderUI({
lapply(1:nrow(final_df()), function(i){
fluidRow(
column(width = 12,
box(
width = 12,
title = paste0("A Program of: ", final_df()[i, 'Organization']),
h2(final_df()[i, 'Program']),
p("Address: ", str_to_title(final_df()[i,'Geoaddress']),
br(),
"Phone: ", final_df()[i,'Phone'], br(),
"website: ", a(href= paste0(final_df()[i,'Website']), paste0(final_df()[i,'Website']) , target="_blank" )),
h3("Description"),
p(final_df()[i,'Description']),
h3("Changes to the Program Due to COVID-19"),
p(final_df()[i,'Notes']),
h4("Hours Open:"),
p("Monday: ", final_df()[i,'Mon'], br(),
"Tuesday: ", final_df()[i,'Tue'],br(),
"Wednesday: ", final_df()[i,'Wed'],br(),
"Thursday: ", final_df()[i,'Thu'],br(),
"Friday: ", final_df()[i,'Fri'],br(),
"Saturday: ", final_df()[i,'Sat'],br(),
"Sunday: ", final_df()[i,'Sun'],br(),
)
)
)
)
})
})
})
# Run the application
shinyApp(ui = ui, server = server)
|
/resourceFinderV5/app.R
|
no_license
|
jasonajones73/ResourceFinderV4
|
R
| false
| false
| 4,600
|
r
|
# Load packages
library(shiny)
library(shinydashboard)
library(shinycssloaders)
# Source necessary files
source("global.R")
source("inputModule.R")
source("leaflet_module.R")
source("addressModule.R")
# Define UI for shiny app
ui <- dashboardPage(
dashboardHeader(title = "Guilford COVID-19 Resource Finder"),
dashboardSidebar(
sidebarMenu(
menuItem("Help Video", tabName = "helpVideo", icon = icon("play-circle")),
menuItem("Resources", tabName = "resources", icon = icon("th-list")),
filterDataInputsUI("selections",
categories = unique(resources$Category)),
addressInputUI("address_input")
)
),
dashboardBody(
tabItems(
tabItem(tabName = "helpVideo",
fluidRow(
column(width = 3),
column(width = 6,
title = "YT Video Here",
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/ScMzIvxBSi4" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
),
column(width = 3)
), # closing fluidRow
), # Closing helpVideo tabItem
tabItem(tabName = "resources",
fluidRow(
column(width = 2),
valueBoxOutput(width = 4, "programTotal"),
valueBoxOutput(width = 4, "programValue"),
column(width = 2)
), # closing fluidRow
fluidRow(
box(title = 'Map of Resources',
width = 12,
withSpinner(leafletMapUI("main_map"), type = 6)),
uiOutput("programinfo")
) # closing fluidRow
) # Closing resources tabItem
) # Closing tabItems
) # Closing dashboardBody
) # Closing dashboardPage
# Define server logic
server <- (function(input, output, session) {
# Coll Module for input selections and store as an object to reuse
selections <- callModule(filterDataServer, "selections", df = resources)
# Create a filtered dataset based on user inputs from input module
final_df <- reactive({
resources %>%
filter(if (is.null(selections$categories())) !(Category %in% selections$categories()) else Category %in% selections$categories()) %>%
filter(if (is.null(selections$cities())) !(City %in% selections$cities()) else City %in% selections$cities()) %>%
filter(if (is.null(selections$programs())) !(Program %in% selections$programs()) else Program %in% selections$programs())
})
# Call address entry module and store in a object to reuse
res <- callModule(addressInputServer, "address_input")
# Call leaflet module and pass both reactive data objects to module
callModule(leafletMapServer, "main_map", map_dat = final_df, add_dat = res)
# Non-module UI components below
## Total Programs value box
output$programTotal <- renderValueBox({
valueBox(
paste0(nrow(resources)), "Total Programs", icon = icon("list"),
color = "blue"
)
})
## Reactive calculated value box for filtered dataset
output$programValue <- renderValueBox({
valueBox(
paste0(nrow(final_df())), "# of filtered Programs", icon = icon("list"),
color = "green"
)
})
## Reactively generated UI for program information below map
output$programinfo <- renderUI({
lapply(1:nrow(final_df()), function(i){
fluidRow(
column(width = 12,
box(
width = 12,
title = paste0("A Program of: ", final_df()[i, 'Organization']),
h2(final_df()[i, 'Program']),
p("Address: ", str_to_title(final_df()[i,'Geoaddress']),
br(),
"Phone: ", final_df()[i,'Phone'], br(),
"website: ", a(href= paste0(final_df()[i,'Website']), paste0(final_df()[i,'Website']) , target="_blank" )),
h3("Description"),
p(final_df()[i,'Description']),
h3("Changes to the Program Due to COVID-19"),
p(final_df()[i,'Notes']),
h4("Hours Open:"),
p("Monday: ", final_df()[i,'Mon'], br(),
"Tuesday: ", final_df()[i,'Tue'],br(),
"Wednesday: ", final_df()[i,'Wed'],br(),
"Thursday: ", final_df()[i,'Thu'],br(),
"Friday: ", final_df()[i,'Fri'],br(),
"Saturday: ", final_df()[i,'Sat'],br(),
"Sunday: ", final_df()[i,'Sun'],br(),
)
)
)
)
})
})
})
# Run the application
shinyApp(ui = ui, server = server)
|
#Download the file and put the file in the data folder
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip")
#nzip the file
unzip(zipfile="./data/Dataset.zip",exdir="./data")
#unzipped files are in the folderUCI HAR Dataset. Get the list of the files
path_rf <- file.path("./data" , "UCI HAR Dataset")
files<-list.files(path_rf, recursive=TRUE)
files
#Read the Activity files
dataActivityTest <- read.table(file.path(path_rf, "test" , "Y_test.txt" ),header = FALSE)
dataActivityTrain <- read.table(file.path(path_rf, "train", "Y_train.txt"),header = FALSE)
#Read the Subject files
dataSubjectTrain <- read.table(file.path(path_rf, "train", "subject_train.txt"),header = FALSE)
dataSubjectTest <- read.table(file.path(path_rf, "test" , "subject_test.txt"),header = FALSE)
#Read Fearures files
dataFeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
dataFeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
#Look at the properties of the above varibles
str(dataActivityTest)
#Merges the training and the test sets to create one data set
#1.Concatenate the data tables by rows
dataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
dataActivity<- rbind(dataActivityTrain, dataActivityTest)
dataFeatures<- rbind(dataFeaturesTrain, dataFeaturesTest)
#2.set names to variables
names(dataSubject)<-c("subject")
names(dataActivity)<- c("activity")
dataFeaturesNames <- read.table(file.path(path_rf, "features.txt"),head=FALSE)
names(dataFeatures)<- dataFeaturesNames$V2
#3.Merge columns to get the data frame Data for all data
dataCombine <- cbind(dataSubject, dataActivity)
Data <- cbind(dataFeatures, dataCombine)
#Extracts only the measurements on the mean and standard deviation for each measurement
#Subset Name of Features by measurements on the mean and standard deviation
#i.e taken Names of Features with "mean()" or "std()"
subdataFeaturesNames<-dataFeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", dataFeaturesNames$V2)]
#Subset the data frame Data by seleted names of Features
selectedNames<-c(as.character(subdataFeaturesNames), "subject", "activity" )
Data<-subset(Data,select=selectedNames)
#Check the structures of the data frame Data
str(Data)
#Uses descriptive activity names to name the activities in the data set
#1.Read descriptive activity names from "activity_labels.txt"
activityLabels <- read.table(file.path(path_rf, "activity_labels.txt"),header = FALSE)
# 2.facorize Variale activity in the data frame Data using descriptive activity names
# 3.check
head(Data$activity,30)
#prefix t is replaced by time
#Acc is replaced by Accelerometer
#Gyro is replaced by Gyroscope
#prefix f is replaced by frequency
#Mag is replaced by Magnitude
#BodyBody is replaced by Body
names(Data)<-gsub("^t", "time", names(Data))
names(Data)<-gsub("^f", "frequency", names(Data))
names(Data)<-gsub("Acc", "Accelerometer", names(Data))
names(Data)<-gsub("Gyro", "Gyroscope", names(Data))
names(Data)<-gsub("Mag", "Magnitude", names(Data))
names(Data)<-gsub("BodyBody", "Body", names(Data))
#Creates a second,independent tidy data set and ouput it
#In this part,a second, independent tidy data set will be created with the average of each variable for each activity and each subject based on the data set in step 4.
library(plyr);
Data2<-aggregate(. ~subject + activity, Data, mean)
Data2<-Data2[order(Data2$subject,Data2$activity),]
write.table(Data2, file = "tidydata.txt",row.name=FALSE)
|
/run_analysis.R
|
no_license
|
saayanguchhait/Getting-and-Cleaning-Data-Project
|
R
| false
| false
| 3,725
|
r
|
#Download the file and put the file in the data folder
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip")
#nzip the file
unzip(zipfile="./data/Dataset.zip",exdir="./data")
#unzipped files are in the folderUCI HAR Dataset. Get the list of the files
path_rf <- file.path("./data" , "UCI HAR Dataset")
files<-list.files(path_rf, recursive=TRUE)
files
#Read the Activity files
dataActivityTest <- read.table(file.path(path_rf, "test" , "Y_test.txt" ),header = FALSE)
dataActivityTrain <- read.table(file.path(path_rf, "train", "Y_train.txt"),header = FALSE)
#Read the Subject files
dataSubjectTrain <- read.table(file.path(path_rf, "train", "subject_train.txt"),header = FALSE)
dataSubjectTest <- read.table(file.path(path_rf, "test" , "subject_test.txt"),header = FALSE)
#Read Fearures files
dataFeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
dataFeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
#Look at the properties of the above varibles
str(dataActivityTest)
#Merges the training and the test sets to create one data set
#1.Concatenate the data tables by rows
dataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
dataActivity<- rbind(dataActivityTrain, dataActivityTest)
dataFeatures<- rbind(dataFeaturesTrain, dataFeaturesTest)
#2.set names to variables
names(dataSubject)<-c("subject")
names(dataActivity)<- c("activity")
dataFeaturesNames <- read.table(file.path(path_rf, "features.txt"),head=FALSE)
names(dataFeatures)<- dataFeaturesNames$V2
#3.Merge columns to get the data frame Data for all data
dataCombine <- cbind(dataSubject, dataActivity)
Data <- cbind(dataFeatures, dataCombine)
#Extracts only the measurements on the mean and standard deviation for each measurement
#Subset Name of Features by measurements on the mean and standard deviation
#i.e taken Names of Features with "mean()" or "std()"
subdataFeaturesNames<-dataFeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", dataFeaturesNames$V2)]
#Subset the data frame Data by seleted names of Features
selectedNames<-c(as.character(subdataFeaturesNames), "subject", "activity" )
Data<-subset(Data,select=selectedNames)
#Check the structures of the data frame Data
str(Data)
#Uses descriptive activity names to name the activities in the data set
#1.Read descriptive activity names from "activity_labels.txt"
activityLabels <- read.table(file.path(path_rf, "activity_labels.txt"),header = FALSE)
# 2.facorize Variale activity in the data frame Data using descriptive activity names
# 3.check
head(Data$activity,30)
#prefix t is replaced by time
#Acc is replaced by Accelerometer
#Gyro is replaced by Gyroscope
#prefix f is replaced by frequency
#Mag is replaced by Magnitude
#BodyBody is replaced by Body
names(Data)<-gsub("^t", "time", names(Data))
names(Data)<-gsub("^f", "frequency", names(Data))
names(Data)<-gsub("Acc", "Accelerometer", names(Data))
names(Data)<-gsub("Gyro", "Gyroscope", names(Data))
names(Data)<-gsub("Mag", "Magnitude", names(Data))
names(Data)<-gsub("BodyBody", "Body", names(Data))
#Creates a second,independent tidy data set and ouput it
#In this part,a second, independent tidy data set will be created with the average of each variable for each activity and each subject based on the data set in step 4.
library(plyr);
Data2<-aggregate(. ~subject + activity, Data, mean)
Data2<-Data2[order(Data2$subject,Data2$activity),]
write.table(Data2, file = "tidydata.txt",row.name=FALSE)
|
library(MASS)
source("casm_helper.R")
source("ising_helpers.R")
require(Rcpp)
library(reshape2)
library(tidyverse)
library(ggplot2)
library(rstan)
library(stats)
library(parallel)
library(gtools)
library(GGally)
library(purrr)
path = "/home/bbales2/casm/invent2"
ecis = rep(0, length(getECIs(path)))
N = getSupercell(path)
keep = 2:13
nonZero = c(3, 4, 5, 6, 7, 14, 15, 16, 17, 18)
ecis[nonZero] = c(0.200, 0.058, 0.139, 0.222, 0.180, -0.031, 0.182, 0.203, -0.118, 0.050)
paths = c("/home/bbales2/ising/paper_outputs/boltzmann.test2.dat",
"/home/bbales2/ising/paper_outputs/boltzmann.test4.dat",
"/home/bbales2/ising/paper_outputs/boltzmann.test6.dat",
"/home/bbales2/ising/paper_outputs/boltzmann.test8.dat",
"/home/bbales2/ising/paper_outputs/boltzmann.test10.dat",
"/home/bbales2/ising/paper_outputs/boltzmann.test12.dat",
"/home/bbales2/ising/paper_outputs/boltzmann.test14.dat",
"/home/bbales2/ising/paper_outputs/boltzmann.test16.dat")
env = new.env()
load(paths[3], envir = env)
corr_names = getCorrs(path) %>%
select(starts_with("corr")) %>% names
getData = function(data, vars) {
data = map(paths, function(path) {
env = new.env()
load(path, envir = env)
out = list()
for(i in 1:length(vars)) {
if(class(env[[vars[i]]])[[1]] == "list") {
out[[vars[i]]] = do.call(rbind, env[[vars[i]]]) %>%
as.tibble %>%
mutate(which = basename(path),
opt = row_number())
} else {
out[[vars[i]]] = env[[vars[i]]] %>%
mutate(which = basename(path))
}
}
out
})
outT = list()
for(i in 1:length(vars)) {
outT[[vars[i]]] = map(data, ~ .[[vars[i]]]) %>%
do.call(rbind, .)
}
outT
}
data = getData(paths, c('opts', 'opts2', 'clexes', 'tclex', 'crs', 'tcr'))
#data$opts2 %>%
# filter(which == "test10.dat") %>%
# select(nonZero) %>%
# ggpairs
data$opts2 = data$opts2 %>%
setNames(c(corr_names, 'which', 'opt'))
# Compare results of optimization to true ecis
levels = names(data$opts2)[nonZero]
data$opts2 %>%
#filter(which %in% c("test1.dat", "test2.dat", "test3.dat")) %>%
select(nonZero, which) %>%
bind_rows(ecis[nonZero] %>%
setNames(levels) %>%
t %>%
as.tibble %>%
mutate(which = "TRUTH")) %>%
gather(corr, eci, starts_with("corr")) %>%
mutate(corr = factor(corr, levels = levels)) %>%
ggplot(aes(corr, eci)) +
geom_point(aes(color = which), shape = 4, size = 2, stroke = 2, position = position_dodge(width = 0.75)) +
theme_set(theme_gray(base_size = 18))
env$pData %>%
bind_rows %>%
ggplot(aes(mu, value)) +
geom_point(aes(colour = "red"), alpha = 0.25) +
geom_line(data = env$tpData, alpha = 0.5) +
facet_grid(temp ~ corr) +
theme_set(theme_gray(base_size = 18))
# Make convex hull plots
hull = data$tclex %>%
group_by(comp, which) %>%
filter(row_number() == which.min(formation_energy)) %>%
ungroup() %>%
mutate(reference = TRUE)
hullNames = hull %>%
pull(configname)
data$clexes %>%
filter(configname %in% hullNames) %>%
mutate(reference = FALSE) %>%
bind_rows(hull) %>%
ggplot(aes(comp, formation_energy)) +
geom_point(aes(color = reference), shape = 4, size = 2, stroke = 2, alpha = 0.5) +
facet_wrap( ~ which) +
theme_set(theme_gray(base_size = 18))
## Make the cooling run plots
data$crs %>%
mutate(chem = factor(param_chem_pota, levels = sample(unique(param_chem_pota)))) %>%
ggplot(aes(corr1, Tfrac)) +
geom_point(aes(group = chem, colour = chem), alpha = 0.5) +
geom_point(data = data$tcr, aes(group = param_chem_pota), colour = "black", alpha = 0.5) +
geom_hline(aes(yintercept = 1.0), color = "black") +
geom_hline(aes(yintercept = 0.5), color = "black") +
facet_grid(which ~ .) +
theme_set(theme_gray(base_size = 18))
|
/paper_makeplots_boltzmannB.R
|
no_license
|
bbbales2/ising
|
R
| false
| false
| 3,880
|
r
|
library(MASS)
source("casm_helper.R")
source("ising_helpers.R")
require(Rcpp)
library(reshape2)
library(tidyverse)
library(ggplot2)
library(rstan)
library(stats)
library(parallel)
library(gtools)
library(GGally)
library(purrr)
path = "/home/bbales2/casm/invent2"
ecis = rep(0, length(getECIs(path)))
N = getSupercell(path)
keep = 2:13
nonZero = c(3, 4, 5, 6, 7, 14, 15, 16, 17, 18)
ecis[nonZero] = c(0.200, 0.058, 0.139, 0.222, 0.180, -0.031, 0.182, 0.203, -0.118, 0.050)
paths = c("/home/bbales2/ising/paper_outputs/boltzmann.test2.dat",
"/home/bbales2/ising/paper_outputs/boltzmann.test4.dat",
"/home/bbales2/ising/paper_outputs/boltzmann.test6.dat",
"/home/bbales2/ising/paper_outputs/boltzmann.test8.dat",
"/home/bbales2/ising/paper_outputs/boltzmann.test10.dat",
"/home/bbales2/ising/paper_outputs/boltzmann.test12.dat",
"/home/bbales2/ising/paper_outputs/boltzmann.test14.dat",
"/home/bbales2/ising/paper_outputs/boltzmann.test16.dat")
env = new.env()
load(paths[3], envir = env)
corr_names = getCorrs(path) %>%
select(starts_with("corr")) %>% names
getData = function(data, vars) {
data = map(paths, function(path) {
env = new.env()
load(path, envir = env)
out = list()
for(i in 1:length(vars)) {
if(class(env[[vars[i]]])[[1]] == "list") {
out[[vars[i]]] = do.call(rbind, env[[vars[i]]]) %>%
as.tibble %>%
mutate(which = basename(path),
opt = row_number())
} else {
out[[vars[i]]] = env[[vars[i]]] %>%
mutate(which = basename(path))
}
}
out
})
outT = list()
for(i in 1:length(vars)) {
outT[[vars[i]]] = map(data, ~ .[[vars[i]]]) %>%
do.call(rbind, .)
}
outT
}
data = getData(paths, c('opts', 'opts2', 'clexes', 'tclex', 'crs', 'tcr'))
#data$opts2 %>%
# filter(which == "test10.dat") %>%
# select(nonZero) %>%
# ggpairs
data$opts2 = data$opts2 %>%
setNames(c(corr_names, 'which', 'opt'))
# Compare results of optimization to true ecis
levels = names(data$opts2)[nonZero]
data$opts2 %>%
#filter(which %in% c("test1.dat", "test2.dat", "test3.dat")) %>%
select(nonZero, which) %>%
bind_rows(ecis[nonZero] %>%
setNames(levels) %>%
t %>%
as.tibble %>%
mutate(which = "TRUTH")) %>%
gather(corr, eci, starts_with("corr")) %>%
mutate(corr = factor(corr, levels = levels)) %>%
ggplot(aes(corr, eci)) +
geom_point(aes(color = which), shape = 4, size = 2, stroke = 2, position = position_dodge(width = 0.75)) +
theme_set(theme_gray(base_size = 18))
env$pData %>%
bind_rows %>%
ggplot(aes(mu, value)) +
geom_point(aes(colour = "red"), alpha = 0.25) +
geom_line(data = env$tpData, alpha = 0.5) +
facet_grid(temp ~ corr) +
theme_set(theme_gray(base_size = 18))
# Make convex hull plots
hull = data$tclex %>%
group_by(comp, which) %>%
filter(row_number() == which.min(formation_energy)) %>%
ungroup() %>%
mutate(reference = TRUE)
hullNames = hull %>%
pull(configname)
data$clexes %>%
filter(configname %in% hullNames) %>%
mutate(reference = FALSE) %>%
bind_rows(hull) %>%
ggplot(aes(comp, formation_energy)) +
geom_point(aes(color = reference), shape = 4, size = 2, stroke = 2, alpha = 0.5) +
facet_wrap( ~ which) +
theme_set(theme_gray(base_size = 18))
## Make the cooling run plots
data$crs %>%
mutate(chem = factor(param_chem_pota, levels = sample(unique(param_chem_pota)))) %>%
ggplot(aes(corr1, Tfrac)) +
geom_point(aes(group = chem, colour = chem), alpha = 0.5) +
geom_point(data = data$tcr, aes(group = param_chem_pota), colour = "black", alpha = 0.5) +
geom_hline(aes(yintercept = 1.0), color = "black") +
geom_hline(aes(yintercept = 0.5), color = "black") +
facet_grid(which ~ .) +
theme_set(theme_gray(base_size = 18))
|
cup4<-c(447, 396, 382, 410)
cup2<-c(438, 521, 468, 391, 504, 472)
five_o_clock<-c(513, 543, 506, 489, 407)
library(clinfun)
jonckheere.test()
|
/11.27.19.V2.R
|
no_license
|
RodionovF/R_scripts
|
R
| false
| false
| 142
|
r
|
cup4<-c(447, 396, 382, 410)
cup2<-c(438, 521, 468, 391, 504, 472)
five_o_clock<-c(513, 543, 506, 489, 407)
library(clinfun)
jonckheere.test()
|
#Main Analysis
library(RSQLite)
library(data.table)
#library(gplots)
#library(dtw)
#library(Rtsne) ##Rtsne
library(proxy) #dist
library(fastcluster)
source("/home/ahe/Analysis/201608_HicChipRnaCor/codes/3Main_Processing/HicChipRNACor_functionPack.r")
#source("F:/DATA/R/Kees/1608_HicChipRNACor/3Main_Processing/HicChipRNACor_functionPack.r")
setwd("/home/ahe/Analysis/201608_HicChipRnaCor/data/")
target_cell_type="GM12"
domain_pos=fread("/home/ahe/Analysis/201608_HicChipRnaCor/data/mydomain/GM_all_elemental_domains.bed",stringsAsFactors = F,sep="\t",select = 1:3,data.table = F)
domain_neg=fread("/home/ahe/Analysis/201608_HicChipRnaCor/data/mydomain/GM_neg_elemental_domains.bed",stringsAsFactors = F,sep="\t",select = 1:3,data.table = F) #GM12878_25K_negative_loops.txt
colnames(domain_neg)=colnames(domain_pos)
target_markers=c("RNA","CAGE","DNase","FAIRE","RRBS","ATF3","BCL3","BCLAF1","BHLHE40","CEBPB","CHD1","CHD2","CREB1","CTCF","CUX1",
"E2F4","EGR1","ELF1","ELK1","EP300","ETS1","EZH2","FOS","GABPA","H2AZ","H3K27ac","H3K27me3","H3K36me3","H3K4me1",
"H3K4me2","H3K4me3","H3K79me2","H3K9ac","H3K9me3","H4K20me1","JUND","MAFK","MAX","MAZ","MEF2A","MYC","NFE2","NFYA",
"NFYB","NR2C2","NRF1","PML","POLR2A","POLR3G","RAD21","RCOR1","REST","RFX5","SIX5","SMC3","SPI1","SP1","SRF","STAT1",
"STAT5A","TAF1","TBL1XR1","TBP","USF1","USF2","YY1","ZBTB33","ZNF143","ZNF274","ZNF384","HCFC1")
dbhandle=dbConnect(SQLite(), dbname = '/home/ahe/Analysis/201608_HicChipRnaCor/data/ChIPnlike/database/tilingdata.sqlite')
#get marker distribution on domains
db_unitsize=1000
db_overlap=500
extension_window_size=1000
extension_window_num=10
devide_into=20
learning_list_idx=rbind(domain_pos,domain_neg)
learning_list=lapply(1:nrow(learning_list_idx),function(x){
#update idx to extended idx
grepping_idx=learning_list_idx[x,]
grepping_idx[2]=grepping_idx[2]-extension_window_size*extension_window_num
grepping_idx[3]=grepping_idx[3]+extension_window_size*extension_window_num
#grep raw matrix from db
raw_matrix=get_MarkMatrix(grepping_idx,dbhandle,target_cell_type,target_markers)
#extension_window_merging
rownum=nrow(raw_matrix)
shrinkrownum=devide_into+extension_window_num*2
shrinked_matrix=matrix(0,shrinkrownum,ncol(raw_matrix))
#pre_domain matrix
for(i in 1:extension_window_num){
shrinked_matrix[i,]=colSums(raw_matrix[(i*2-1):(i*2+1),])
}
extension_window_num_in_raw=extension_window_num*2+1
shrinked_matrix[(extension_window_num+1):(extension_window_num+devide_into),]=matrix_percentail_shrink(raw_matrix[extension_window_num_in_raw:(rownum-extension_window_num*2),],devide_into)
for(i in (extension_window_num+devide_into+1):shrinkrownum){
shrinked_matrix[i,]=colSums(raw_matrix[(rownum-extension_window_num*2+i*2-2):(rownum-extension_window_num*2+i*2),])
}
return(shrinked_matrix)
})
save(learning_list,file="markers_on_domain.Rdata")
|
/3Main_Processing/Main_forDomains.r
|
no_license
|
Arthurhe/ChIP-RNA-HiC-correlations
|
R
| false
| false
| 2,964
|
r
|
#Main Analysis
library(RSQLite)
library(data.table)
#library(gplots)
#library(dtw)
#library(Rtsne) ##Rtsne
library(proxy) #dist
library(fastcluster)
source("/home/ahe/Analysis/201608_HicChipRnaCor/codes/3Main_Processing/HicChipRNACor_functionPack.r")
#source("F:/DATA/R/Kees/1608_HicChipRNACor/3Main_Processing/HicChipRNACor_functionPack.r")
setwd("/home/ahe/Analysis/201608_HicChipRnaCor/data/")
target_cell_type="GM12"
domain_pos=fread("/home/ahe/Analysis/201608_HicChipRnaCor/data/mydomain/GM_all_elemental_domains.bed",stringsAsFactors = F,sep="\t",select = 1:3,data.table = F)
domain_neg=fread("/home/ahe/Analysis/201608_HicChipRnaCor/data/mydomain/GM_neg_elemental_domains.bed",stringsAsFactors = F,sep="\t",select = 1:3,data.table = F) #GM12878_25K_negative_loops.txt
colnames(domain_neg)=colnames(domain_pos)
target_markers=c("RNA","CAGE","DNase","FAIRE","RRBS","ATF3","BCL3","BCLAF1","BHLHE40","CEBPB","CHD1","CHD2","CREB1","CTCF","CUX1",
"E2F4","EGR1","ELF1","ELK1","EP300","ETS1","EZH2","FOS","GABPA","H2AZ","H3K27ac","H3K27me3","H3K36me3","H3K4me1",
"H3K4me2","H3K4me3","H3K79me2","H3K9ac","H3K9me3","H4K20me1","JUND","MAFK","MAX","MAZ","MEF2A","MYC","NFE2","NFYA",
"NFYB","NR2C2","NRF1","PML","POLR2A","POLR3G","RAD21","RCOR1","REST","RFX5","SIX5","SMC3","SPI1","SP1","SRF","STAT1",
"STAT5A","TAF1","TBL1XR1","TBP","USF1","USF2","YY1","ZBTB33","ZNF143","ZNF274","ZNF384","HCFC1")
dbhandle=dbConnect(SQLite(), dbname = '/home/ahe/Analysis/201608_HicChipRnaCor/data/ChIPnlike/database/tilingdata.sqlite')
#get marker distribution on domains
db_unitsize=1000
db_overlap=500
extension_window_size=1000
extension_window_num=10
devide_into=20
learning_list_idx=rbind(domain_pos,domain_neg)
learning_list=lapply(1:nrow(learning_list_idx),function(x){
#update idx to extended idx
grepping_idx=learning_list_idx[x,]
grepping_idx[2]=grepping_idx[2]-extension_window_size*extension_window_num
grepping_idx[3]=grepping_idx[3]+extension_window_size*extension_window_num
#grep raw matrix from db
raw_matrix=get_MarkMatrix(grepping_idx,dbhandle,target_cell_type,target_markers)
#extension_window_merging
rownum=nrow(raw_matrix)
shrinkrownum=devide_into+extension_window_num*2
shrinked_matrix=matrix(0,shrinkrownum,ncol(raw_matrix))
#pre_domain matrix
for(i in 1:extension_window_num){
shrinked_matrix[i,]=colSums(raw_matrix[(i*2-1):(i*2+1),])
}
extension_window_num_in_raw=extension_window_num*2+1
shrinked_matrix[(extension_window_num+1):(extension_window_num+devide_into),]=matrix_percentail_shrink(raw_matrix[extension_window_num_in_raw:(rownum-extension_window_num*2),],devide_into)
for(i in (extension_window_num+devide_into+1):shrinkrownum){
shrinked_matrix[i,]=colSums(raw_matrix[(rownum-extension_window_num*2+i*2-2):(rownum-extension_window_num*2+i*2),])
}
return(shrinked_matrix)
})
save(learning_list,file="markers_on_domain.Rdata")
|
# Uses output of integrateSoundsscapeData.R to create plots and models for AK soundscape project
rm(list=ls())
#general
library(dplyr)
library(data.table)
library(ggplot2)
#for model
library(suncalc)
library(mgcv)
library(zoo)
library(MuMIn)
library(visreg)
library(corrplot)
#for map
library("sf")
library("rnaturalearth")
library("rnaturalearthdata")
library (geosphere)
library(ggsn)
#IMPORT data for Gambell site for entire year 2015-2016
#--------------------------------------------------------------------------------
#includes ambient levels, presence of sounds, AIS ships, and wind data
load("D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\data\\dataSpAShWeTiIce")
#load("D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\data\\dataSpAShWe")
#load("D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\data\\dataSpAShWeIce")
dataSpAShWeIce = dataSpAShWeTiIce
#PLOT MAP OF REGION-- Figure 1
#--------------------------------------------------------------------------------
#world map (map with ships see rdAIS_HarrisSat.r)
theme_set(theme_bw())
world = ne_countries(scale = "medium", returnclass = "sf")
class(world)
# ggplot(data = world ) +
# geom_sf() +
# geom_rect(xmin = -170, xmax = -166, ymin = 64, ymax = 66,
# fill = NA, colour = "black", size = 1.5)
AKprog = 3467 # c(-4596612.39 -2250856.49) , c( 2,024,122.30 4,364,571.46)
WGS84proj = 4326
sites <- st_as_sf(data.frame( latitude = c(65.69976,63.8178), longitude = c(-168.38855,-171.6915) ),
coords = c("longitude", "latitude"), crs = WGS84proj,
agr = "constant")# Bering/Gambell
# AK map for context with bounding box (add labels in illustrator)
ggplot(data = world) +
geom_sf(aes(fill = region_wb)) +
geom_sf(data = sites, size = 3, shape = 23, fill = "darkred") +
coord_sf(crs = st_crs(AKprog),
xlim = c(-1800000, 800000),
ylim = c(240000, 2500000), expand = FALSE, datum = NA) +
scale_fill_viridis_d(option = "E") +
theme(legend.position = "none", axis.title.x = element_blank(),
axis.title.y = element_blank(), panel.background = element_rect(fill = "azure"),
panel.border = element_rect(fill = NA))
##label in illustrator, scale bar not working
#annotate(geom = "text", x = 64, y = -151, label = "Gulf of Mexico",
#fontface = "italic", color = "grey22", size = 6) +
#scalebar(sites, dist = 100, dist_unit = "km", transform = TRUE, model = "WGS84", location="bottomright") +
#PLOT Sound sources-- Figure 2 (see integreateSoundscapeData. R to generate this figure)
#--------------------------------------------------------------------------------
cols <- sapply(dataSpAShWeIce, is.logical)
dataSpAShWeIce[,cols] <- lapply(dataSpAShWeIce[,cols], as.numeric)
dataSpAShWeIceem = reshape :: melt(dataSpAShWeIce, id.vars = "startDateTime",
measure.vars = c("Bmy","Dle","Oro","Eba","Hfa","Bal","Ice","Unk","Anth","Ubi",'nShips'))
dataSpAShWeIceem$Day = as.Date(dataSpAShWeIceem$startDateTime)
uSource = unique(dataSpAShWeIceem$variable)
daySum = NULL
for (ii in 1:length(uSource)){
dtmp = dataSpAShWeIceem[dataSpAShWeIceem$variable == uSource[ii],]
#all days with source of interest
uday = unique(dtmp$Day)
for (dd in 1:length(uday)){ #for each day total up the samples with source and total samples
dtmp2 = dtmp[dtmp$Day == uday[dd],]
daySum = rbind(daySum, c( (as.character( uday[dd])), as.character(uSource[ii]),
sum(as.numeric(dtmp2$value), na.rm = T),
nrow(dtmp2)) )
}
rm(dtmp,uday,dtmp2)
}
# find how many day with source present to add to y-label on graphic
uSource = unique(dataSpAShWeIceem$variable)
daySum = as.data.frame(daySum)
SourceCnt = NULL
for (ii in 1:length(uSource)) {
tmp = daySum[daySum$V2 == uSource[ii],]
tmp2 = sum(as.numeric( as.character(tmp$V3 ))> 0)
SourceCnt = rbind( SourceCnt, c(as.character(uSource[ii]), tmp2, tmp2/nrow(tmp)))
rm(tmp,tmp2)
}
#plot
colnames(daySum) = c("Day","variable","samples","total")
daySum$Day2 = as.Date(daySum$Day)
daySum$perSample = as.numeric(as.character(daySum$samples))/as.numeric(as.character(daySum$total))*100
uvar = unique(daySum$variable)
uorder = c("k","h","j","i","f","g","d","a","c","e","b" ) #c(1,4,2,3,6,5,8,11,9,7,10
ulabs = c("Unknown (78)", "AIS ships (73)", "Anthropogenic (4)", "Ice (100)",
"Unknown Biological (149)","Ribbon seal (1)", "Baleen whale (24)","Beluga (88)", "Bearded seal (136)", "Walrus (155)", "Bowhead (157)" )
for (i in 1:length(uvar)){
idxO = which(daySum$variable == uvar[i])
daySum$Order[idxO] = paste0(uorder[i],uvar[i])
}
ggplot(daySum, aes(Day2, Order, fill= as.numeric(perSample))) +
geom_tile() +
scale_fill_gradient(low="white", high="blue") +
scale_y_discrete(labels = ulabs) +
labs(fill = "% Daily Samples") +
xlab("") +
ylab("")
#PLOT Sound sources-- Figure 3 A)Bowhead B) Wind speed C)AIS ships <5 km, all with ambient
#--------------------------------------------------------------------------------
# codes: feed|unk|ic|bmy|dle|oro|eba|erb|hfa|mbo|bac|uba|ubi
colFreq = c(58,90)
quants <- c(.10,0.50,.90)
quants1 = .5
#------change these for source of interest------#
src = "Biological" #only baleen whales
SOI = dataSpAShWeIce[ !grepl("feed|unk|ice|dle|oro|eba|erb|hfa",dataSpAShWeIce$sps2) & dataSpAShWeIce$Ship == 0 & dataSpAShWeIce$Sounds > 0 ,]
SOI = SOI[!is.na(SOI$`8`),] #remove rows with NA for acoustic values
#SOI$BioOnly
names( SOI )[colFreq[1]:colFreq[2]] = c(6.3, 8, 10, 12.5, 16, 20, 25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000)
freq = (names(SOI[colFreq[1]:colFreq[2]]) )
mBio= apply( SOI[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
mBiological = apply( SOI[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants , na.rm = TRUE )
mBiological2 = cbind(as.data.frame(t(mBiological)),freq)
colnames(mBiological2) = c('p10th',"p50th","p90th","freq")
# ggplot( mBiological2, aes(y=p50th, ymax=p10th, ymin=p90th, x = as.numeric( as.character(freq))) ) +
# geom_point() + geom_line()+
# geom_ribbon(linetype=2, alpha=0.1) +
# scale_x_log10() +
# ylim( c(50,120)) +
# xlab("Frequency") +
# ylab(expression( paste("1/3 Octave Band Sound Pressure Level dB re: 1",mu,"Pa")) )+
# annotate("text", x=1000, y=120, label= paste(src, " samples (N = ", nrow(SOI), " on ", length(unique(SOI$dateStart) ), " days)", sep="") )
##ribbon was not working to set y limits on the graph....
pBio = ggplot( mBiological2) +
geom_point(aes(y=p50th, x = as.numeric( as.character(freq))) ) +
geom_line(aes(y=p50th, x = as.numeric( as.character(freq))),size=1.5 )+
geom_line(aes(y=p10th, x = as.numeric( as.character(freq))),color = "gray")+
geom_line(aes(y=p90th, x = as.numeric( as.character(freq))),color = "gray")+
scale_x_log10() +
ylim( c(70,130)) +
xlab("Frequency") +
ylab(expression( paste("1/3 Octave Band SPL dB re: 1",mu,"Pa")) )+
annotate("text", x=10, y=128, label= "(C)", size=5) +
annotate("text", x=150, y=75, label= paste(src, " only samples (N = ", nrow(SOI), " on ", length(unique(SOI$dateStart) ), " days)", sep=""), size=5 )
rm(src,SOI)
src = "Bowhead"
SOI = dataSpAShWeIce[!grepl("feed|unk|ice|dle|oro|eba|erb|hfa|mbo|bac|uba|ubi",dataSpAShWeIce$sps2) & dataSpAShWeIce$Ship == 0 & dataSpAShWeIce$Sounds > 0,]
SOI = SOI[!is.na(SOI$`8`),] #remove rows with NA for acoustic values
names( SOI )[colFreq[1]:colFreq[2]] = c(6.3, 8, 10, 12.5, 16, 20, 25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000)
freq = (names(SOI[colFreq[1]:colFreq[2]]) )
mBowhead = apply( SOI[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants , na.rm = TRUE )
mBowhead2 = cbind(as.data.frame(t(mBowhead)),freq)
colnames(mBowhead2) = c('p10th',"p50th","p90th","freq")
pBow = ggplot( mBowhead2) +
geom_point(aes(y=p50th, x = as.numeric( as.character(freq))) ) +
geom_line(aes(y=p50th, x = as.numeric( as.character(freq))),size=1.5 )+
geom_line(aes(y=p10th, x = as.numeric( as.character(freq))),color = "gray")+
geom_line(aes(y=p90th, x = as.numeric( as.character(freq))),color = "gray")+
scale_x_log10() +
ylim( c(70,130)) +
xlab("Frequency") +
ylab(expression( paste("1/3 Octave Band SPL dB re: 1",mu,"Pa")) )+
annotate("text", x=10, y=128, label= "(D)", size=5) +
annotate("text", x=150, y=75, label= paste(src, " only samples (N = ", nrow(SOI), " on ", length(unique(SOI$dateStart) ), " days)", sep=""), size=5 )
rm(src,SOI)
src = "AIS Ship" #(D), only at windspeeds less than 10 knots
SOI = dataSpAShWeIce[ !grepl("feed|unk|ice|bmy|dle|oro|eba|erb|hfa|mbo|bac|uba|ubi", dataSpAShWeIce$sps2) & dataSpAShWeIce$Ship > 0,]
SOI = SOI[!is.na(SOI$`8`),] #remove rows with NA for acoustic values
#only ships within 5 km, traveling more than 5 knots
#need to deal with cells with multiple ship values!!!
SOI = SOI[as.numeric(as.character(SOI$mnDist)) >= 5000 ,]#only ships within 5 km
SOI = SOI[as.numeric(as.character(SOI$uSOG)) >= 5 ,]#only ships speed > 5 kts
SOI = SOI[as.numeric(as.character(SOI$HourlyWindSpeed)) <= 10 ,]#only ships in < 10 knot winds
names( SOI )[colFreq[1]:colFreq[2]] = c(6.3, 8, 10, 12.5, 16, 20, 25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000)
freq = (names(SOI[colFreq[1]:colFreq[2]]) )
mSh = apply( SOI[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
mShips = apply( SOI[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants , na.rm = TRUE )
mShips2 = cbind(as.data.frame(t(mShips)),freq)
colnames(mShips2) = c('p10th',"p50th","p90th","freq")
pShi = ggplot( mShips2) +
geom_point(aes(y=p50th, x = as.numeric( as.character(freq))) ) +
geom_line(aes(y=p50th, x = as.numeric( as.character(freq))),size=1.5 )+
geom_line(aes(y=p10th, x = as.numeric( as.character(freq))),color = "gray")+
geom_line(aes(y=p90th, x = as.numeric( as.character(freq))),color = "gray")+
scale_x_log10() +
ylim( c(70,130)) +
xlab("Frequency") +
ylab(expression( paste("1/3 Octave Band SPL dB re: 1",mu,"Pa")) )+
annotate("text", x=10, y=128, label= "(D)", size=5) +
annotate("text", x=150, y=75, label= paste(src, " only samples within 5 km (N = ", nrow(SOI), " on ", length(unique(SOI$dateStart) ), " days)", sep=""), size=5)
rm(src,SOI)
src = "Ambient" # unique( dataSpAShWeIce$HourlyWindSpeed )
SOI = dataSpAShWeIce[dataSpAShWeIce$Sounds == 0 & dataSpAShWeIce$Ship == 0 & dataSpAShWeIce$HourlyWindSpeed < 10, ,] #no sounds present unique(tst$Sounds)
SOI = SOI[!is.na(SOI$`8`),] #remove rows with NA for acoustic values
mAmbient = apply( SOI[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
#WIND SPEED
dataSpAShWeIce$HourlyWindSpeed2 = as.numeric( gsub("s","", as.character(dataSpAShWeIce$HourlyWindSpeed) ) )
dataSpAShWeIce$HourlyWindSpeed2r = round(as.numeric( gsub("s","", as.character(dataSpAShWeIce$HourlyWindSpeed) )),digits =-1)
SOI = dataSpAShWeIce[dataSpAShWeIce$Sounds == 0 & dataSpAShWeIce$Ship == 0 ,] #no sounds present unique(tst$Sounds)
ambData = SOI[!is.na(SOI$`8`),] #remove rows with NA for acoustic values
names( ambData )[colFreq[1]:colFreq[2]] = c(6.3, 8, 10, 12.5, 16, 20, 25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000)
freq = (names(ambData[colFreq[1]:colFreq[2]]) )
WS0 = ambData[ambData$HourlyWindSpeed2r == 0,]
nrow(WS0)
WS0 = apply( WS0[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
WS10 = ambData[ambData$HourlyWindSpeed2r == 10,]
nrow(WS10)
WS10 = apply( WS10[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
WS20 = ambData[ambData$HourlyWindSpeed2r == 20,]
nrow(WS20)
WS20 = apply( WS20[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
WS30 = ambData[ambData$HourlyWindSpeed2r == 30,]
nrow(WS30)
WS30 = apply( WS30[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
WS40 = ambData[ambData$HourlyWindSpeed2r == 40,]
nrow(WS40)
WS40 = apply( WS40[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
WSsum = rbind(WS0,WS10,WS20,WS30,WS40)
WSsum2 = reshape :: melt(WSsum)
library(scales)
# Median SPL wind speeds (A)
pWS = ggplot(WSsum2, aes(x=(X2), y=value, color = X1) )+
geom_line(size = 1) +
geom_point(size = 1)+
scale_x_log10() +
labs(color = "Wind speed [kts]")+
scale_color_manual(labels = c("0", "10", "20","30","40") , values = hue_pal()(5) ) +
xlab("Frequency") + ylab(expression( paste("Median SPL (Leq) dB re: 1",mu,"Pa"))) +
annotate("text", x=10, y=128, label= "(A)", size=5) +
ylim(c(70,130)) +
theme(legend.position = c(0.8, 0.2))
#theme_minimal()
#comparison of median spectra for source of interest (B)
mSOIs = reshape2::melt( rbind(mBio, mSh,mAmbient))
mSOIs$Var2 = as.numeric( as.character( mSOIs$Var2))
pMed = ggplot(mSOIs, aes(x=Var2, y = value, color = Var1))+
geom_line(size = 1)+
geom_point(size = 1)+
scale_x_log10() +
scale_color_manual(labels = c("Biological only", "AIS ships only", "Ambient") , values = c("#00BFC4","#F8766D","black") ) +
labs(color = "Sources present")+
annotate("text", x=10, y=128, label= "(B)", size=5) +
xlab("Frequency") + ylab(expression( paste("Median SPL (Leq) dB re: 1",mu,"Pa"))) +
theme(legend.position = c(0.8, 0.2))+
#theme_minimal()
ylim(c(70,130))
library(gridExtra)
grid.arrange(pWS, pMed, pBio, pShi, ncol=2, nrow = 2)
## RESPONSE variable-- ambient sound levels in octave bands
#--------------------------------------------------------------------------------
colnames(dataSpAShWeIce)[71] = "Fq_125Hz"
colnames(dataSpAShWeIce)[74] = "Fq_250Hz"
colnames(dataSpAShWeIce)[77] = "Fq_500Hz"
colnames(dataSpAShWeIce)[80] = "Fq_1000Hz"
colnames(dataSpAShWeIce)[83] = "Fq_2000Hz"
colnames(dataSpAShWeIce)[89] = "Fq_8000Hz"
##PREDICTOR variables-- reformat, distribution, values
#--------------------------------------------------------------------------------
par(mfrow=c( 3,2))
#1) MONTH- as indicator of season
dataSpAShWeIce$mthS = as.numeric(as.character(dataSpAShWeIce$mthS ))
hist(dataSpAShWeIce$mthS,main="Month" )
#2) NUMBER of sounds present
dataSpAShWeIce$Sounds = as.numeric(as.character(dataSpAShWeIce$Sounds ))
hist(dataSpAShWeIce$Sounds,main="#Sounds")
#3) NUMBER of ships present
dataSpAShWeIce$nShips = as.numeric(as.character(dataSpAShWeIce$nShips ))
#unique( dataSpAShWeIce$nShips )
dataSpAShWeIce$nShips[is.na(dataSpAShWeIce$nShips)] <- 0
hist(dataSpAShWeIce$nShips,main="#Ships" )
#4) PRESENCE OF SOURCES
#4a) Bowhead
dataSpAShWeIce$Bmy = as.numeric(as.character( dataSpAShWeIce$Bmy ))
hist(dataSpAShWeIce$Bmy,main="bowhead")
#unique(dataSpAShWeIce$Bmy )
#4b) Bearded seal
dataSpAShWeIce$Eba = as.numeric(as.character(dataSpAShWeIce$Eba ))
#hist(dataSpAShWeIce$Eba )
#unique(dataSpAShWeIce$Eba )
#4c Unknown biologiacl
dataSpAShWeIce$Ubi = as.numeric(as.character(dataSpAShWeIce$Ubi ))
#5d Baleen whale
dataSpAShWeIce$Bal = as.numeric(as.character(dataSpAShWeIce$Bal ))
#5) WINDSPEED
hist( dataSpAShWeIce$HourlyWindSpeed2 ,main = "wind")
#unique(dataSpAShWeIce$HourlyWindSpeed2)
#6) ICE coverage for the day
dataSpAShWeIce$ice_concentration_20km = as.numeric( as.character(dataSpAShWeIce$ice_concentration_20km) )
hist (dataSpAShWeIce$ice_concentration_20km,main="ice")
#7) SUN altitude- day light related to biological activity??
gambell = c(63.8227,171.6758)
tmp = getSunlightPosition(dataSpAShWeIce$dataTime,gambell[1],gambell[2])
dataSpAShWeIce$sunAlt = tmp$altitude
#8) JULIAN day
dataSpAShWeIce$Jul = as.numeric( as.character( ( yday(dataSpAShWeIce$dataTime) )))
#9) HOUR of the day
dataSpAShWeIce$hour2 = as.numeric( as.character( hour(dataSpAShWeIce$dataTime) ))
#hist(dataSpAShWeIce$hour2)
dCols = data.frame(colnames(dataSpAShWeIce))
dCols
##SOME CHECKS MODEL INPUTS
#--------------------------------------------------------------------------------
par(mfrow=c( 1,1))
#1) autocorrlation term- how correlated is SPL to previous hour?- very!!!
dataSpAShWeIce$Fq_125HzShift = shift(dataSpAShWeIce$Fq_125Hz,fill = NA)
plot(dataSpAShWeIce$Fq_125HzShift, dataSpAShWeIce$Fq_125Hz)
corACI = cor(dataSpAShWeIce$Fq_125HzShift,dataSpAShWeIce$Fq_125Hz, method = "pearson",use="complete.obs")
#2) predictor variable correlation-- only use month, not Julian day
newdata <- dataSpAShWeIce[c("mthS" , "Jul", "sunAlt",
"ice_concentration_20km" ,"HourlyWindSpeed2",
"Eba", "Ubi", "Bal", "nShips")]
corr = cor(newdata)
#corrplot(corr)
#2) NA values-- models can't deal with
idx = apply(is.na(dataSpAShWeIce[,c("Jul", "sunAlt", "ice_concentration_20km", "HourlyWindSpeed2", "Sounds", "Eba", "Bmy", "Bal","Ubi" )]),1,any)
remRows = nrow(dataSpAShWeIce) - nrow(dataSpAShWeIce[!idx,])
remRows - length (which(is.na(dataSpAShWeIce[,("HourlyWindSpeed2")] ) ) )
nrow(dataSpAShWeIce[!idx,])/ nrow(dataSpAShWeIce) #54% of data NA so removed--yikes!!
#interoloate because all wind- missing wind (what is the optimal gap-4 samples for an hour)
dataNA = dataSpAShWeIce[!idx,] #matrix of the missing wind data...
intrpNA = na.approx(dataSpAShWeIce$HourlyWindSpeed2,maxgap=(6),na.rm = FALSE )
# NAs still left
length(which(is.na(intrpNA )))
length(which(is.na(intrpNA )))/ nrow(dataSpAShWeIce) #38% of data NA so removed
par(mfrow=c( 2,1))
plot(intrpNA, pch = 20, col="blue", cex=.5, main = "Wind speed with interpolation max gap = 6")
plot(dataSpAShWeIce$HourlyWindSpeed2,pch = 20,col="blue", cex=.5, main = "Wind speed")
dataSpAShWeIce$HourlyWindSpeed2int = intrpNA
##MODEL-- gam
#--------------------------------------------------------------------------------
#smooth term selection using select=TRUE, which penalizes wiggliness and removes terms with poor fit from the model
#We also fit all models with gamma=1.4, which further restricts wiggliness
#smooth terms: https://www.rdocumentation.org/packages/mgcv/versions/1.8-33/topics/smooth.terms
#remove rows with NAs-- mostly from wind (ugh!)
dataModel = dataSpAShWeIce[!is.na(intrpNA),]
#check other variables for NAs...
unique(dataModel$hour2)
ck = dataModel[is.na(dataModel$hour2),]
unique(dataModel$Jul)
unique(dataModel$sunAlt)
unique(dataModel$ice_concentration_20km)
unique(dataModel$HourlyWindSpeed2int)
unique(dataModel$Eba)
unique(dataModel$Bmy)
unique(dataModel$Bal)
unique(dataModel$Ubi)
#HYPOTHESIS: enviroment still driven/predicted by wind and biological, not human activity
# add back in when variables set: correlation=corCAR1(value=corACI,form=~dataTime)
ctrl = list(nthreads=6)
rm(newdata,idx,remRows,tmp)
#-------------------------
##1) 125 Hz octave band model
#-------------------------
#a. GLOBAL model-- all possible variables
options(na.action = "na.omit")
global.Gamm125 = gam(Fq_125Hz ~ s(Jul,bs = "cr") + s(hour2, bs="cc")+
s(ice_concentration_20km,bs = "cr") + s(HourlyWindSpeed2int,bs = "cr") +
s(sunAlt,bs = "cr") + (nShips) +
(Bmy)+ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
#model evaluation
summary(global.Gamm125)
anova(global.Gamm125)#anova provides whole term significance for parametric terms
par(mfrow=c( 3,4))
visreg(global.Gamm125) #all variables
#abiotic variables
p1 = visreg(global.Gamm125,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (125 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=10, y=125, label="(A)") + theme_bw()
p2 = visreg(global.Gamm125,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
p3 = visreg(global.Gamm125,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
grid.arrange(p1, p2, p3, ncol=3, nrow = 1)
#all significant variables- supplementary info
pJulian = visreg(global.Gamm125,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (125 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=20, y=125, label="(A)") + theme_bw()
pWind = visreg(global.Gamm125,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
pIce = visreg(global.Gamm125,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
pHour = visreg(global.Gamm125,"hour2", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Hour") + ylim(c(70,125))+
annotate("text", x=2, y=125, label="(D)") + theme_bw()
pSun = visreg(global.Gamm125,"sunAlt", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Sun") + ylim(c(70,125))+
annotate("text", x=-0.7, y=125, label="(E)") + theme_bw()
pBmy = visreg(global.Gamm125,"Bmy", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Bowhead") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(F)") + theme_bw()
pBal = visreg(global.Gamm125,"Bal", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Baleen") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(G)") + theme_bw()
pEba = visreg(global.Gamm125,"Eba", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Eba") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(H)") + theme_bw()
pOro = visreg(global.Gamm125,"Oro", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Oro") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(I)") + theme_bw()
pUbi = visreg(global.Gamm125,"Ubi", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Unidentified Biologic") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(J)") + theme_bw()
grid.arrange(pJulian, pWind, pIce, pHour, pSun, pBmy, pBal,pEba,pOro,pUbi, ncol=5, nrow =2)
#dispersion parameter a la Fox and Weisberg 2019 Companion to Applied Regression, dispersion < 1, model not overdispersed (no random variables, so not relevant??)
#https://stackoverflow.com/questions/59342595/how-to-check-for-overdispersion-in-a-gam-with-negative-binomial-distribution
sum(residuals(global.Gamm125,"pearson")^2)/df.residual(global.Gamm125)
#autocorrelation
pacf(residuals(global.Gamm125)) #why does this not change when I add in corelation term to model?
#best model-- use DREDGE function to rank by ACI
# did not use b/c similar results to global model and want to evalute model for all variables
#options(na.action = "na.fail")
#global.Gamm125_dredge = dredge(global.Gamm125, rank=AIC)
#global.Gamm125_dredge
#subset(global.Gamm125_dredge, subset=delta<2 ) #2 models
#WOCR1<- get.models(global.Gamm125_dredge, subset = 1)[[508]] #best model
#summary(WOCR1)
#b. DETERMINE how much of the variability in the dependent variable, using the global model
# explained by each term in the model https://stat.ethz.ch/pipermail/r-help/2007-October/142811.html
# first version, all models include seasonal component... decided to separate out these terms
#smoothing parameters: global.Gamm125$sp, so we can hold the smoothing terms fixed.
global.Gamm125$sp
mPhysic = gam(Fq_125Hz ~ s(ice_concentration_20km,bs = "cr",sp = global.Gamm125$sp[4:5] ) + s(HourlyWindSpeed2int,bs = "cr",sp = global.Gamm125$sp[6:7]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mPhysic)
devPhysic = 1-(mPhysic$deviance/mPhysic$null.deviance)
mBiolog = gam(Fq_125Hz ~ (Bmy)+ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mBiolog)
devBiolog = 1-(mBiolog$deviance/mBiolog$null.deviance)
mAnthro = gam(Fq_125Hz ~ s(sunAlt,bs = "cr",sp = global.Gamm125$sp[8:9]) + (nShips),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mAnthro)
devAnthro = 1-(mAnthro$deviance/mAnthro$null.deviance)
mSeason = gam(Fq_125Hz ~ s(Jul,bs = "cr",sp = global.Gamm125$sp[1:2]) + s(hour2, bs="cc", sp = global.Gamm125$sp[3]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mSeason)
devSeason = 1-(mSeason$deviance/mSeason$null.deviance)
ModComp = NULL
ModComp = rbind(ModComp, c("125Hz", 1-(global.Gamm125$deviance/global.Gamm125$null.deviance), devPhysic, devSeason, devBiolog, devAnthro ))
# NOTE: used this https://stats.stackexchange.com/questions/325832/gam-mgcv-aic-vs-deviance-explained instead of this...
#ddevPhysic = deviance(global.Gamm125)- deviance(mPhysic)
#ddevBiolog = deviance(global.Gamm125)- deviance(mBiolog)
#ddevAnthro = deviance(global.Gamm125)- deviance(mAnthro)
#c. PLOTS to save out for each FQ band
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq125_modelPerfo.png",
width=600, height=350,pointsize = 12)
par(mfrow=c( 1,1))
pSPL = (predict(global.Gamm125))
plot(dataModel$Fq_125Hz, predict(global.Gamm125), main = "Predicted vs Actual")
dev.off()
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq125_ConditionalPlots.png")
par(mfrow=c(3,4))
visreg(global.Gamm125,main="125Hz")
dev.off()
#-------------------------
##2) 250 Hz octave band
#-------------------------
#a. GLOBAL model-- all possible variables
options(na.action = "na.omit")
global.Gamm250 = gam(Fq_250Hz ~ s(Jul,bs = "cr") + s(hour2, bs="cc")+
s(ice_concentration_20km,bs = "cr") + s(HourlyWindSpeed2int,bs = "cr") +
s(sunAlt,bs = "cr") + (nShips) +
(Bmy)+ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
#model evaluation
summary(global.Gamm250)
anova(global.Gamm250)#anova provides whole term significance for parametric terms
#par(mfrow=c( 3,3))
#visreg(global.Gamm250)
p1 = visreg(global.Gamm250,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (250 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=10, y=125, label="(A)") + theme_bw()
p2 = visreg(global.Gamm250,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
p3 = visreg(global.Gamm250,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
grid.arrange(p1, p2, p3, ncol=3, nrow = 1)
#all significant variables- supplementary info
pJulian = visreg(global.Gamm250,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (250 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=18, y=125, label="(A)") + theme_bw()
pHour = visreg(global.Gamm250,"hour2", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Hour") + ylim(c(70,125))+
annotate("text", x=1, y=125, label="(D)") + theme_bw()
pIce = visreg(global.Gamm250,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
pWind = visreg(global.Gamm250,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
pSun = visreg(global.Gamm250,"sunAlt", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Sun") + ylim(c(70,125))+
annotate("text", x=-0.8, y=125, label="(E)") + theme_bw()
pBal = visreg(global.Gamm250,"Bal", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Baleen") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(F)") + theme_bw()
pUbi = visreg(global.Gamm250,"Ubi", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Unidentified Biologic") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(G)") + theme_bw()
pEba = visreg(global.Gamm250,"Eba", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Eba") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(H)") + theme_bw()
grid.arrange(pJulian, pWind, pIce, pHour, pSun, pBal, pUbi, pEba, ncol=4, nrow = 2)
#dispersion parameter a la Fox and Weisberg 2019 Companion to Applied Regression, dispersion < 1, model not overdispersed (no random variables, so not relevant??)
#https://stackoverflow.com/questions/59342595/how-to-check-for-overdispersion-in-a-gam-with-negative-binomial-distribution
sum(residuals(global.Gamm250,"pearson")^2)/df.residual(global.Gamm250)
#autocorrelation
pacf(residuals(global.Gamm250)) #why does this not change when I add in corelation term to model?
#b. DETERMINE how much of the variability in the dependent variable, using the global model
# explained by each term in the model https://stat.ethz.ch/pipermail/r-help/2007-October/142811.html
# first version, all models include seasonal component... decided to separate out these terms
#smoothing parameters: global.Gamm250$sp, so we can hold the smoothing terms fixed.
global.Gamm250$sp
mPhysic = gam(Fq_250Hz ~ s(ice_concentration_20km,bs = "cr",sp = global.Gamm250$sp[4:5] ) + s(HourlyWindSpeed2int,bs = "cr",sp = global.Gamm250$sp[6:7]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mPhysic)
devPhysic = 1-(mPhysic$deviance/mPhysic$null.deviance)
mBiolog = gam(Fq_250Hz ~ (Bmy)+ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mBiolog)
devBiolog= 1-(mBiolog$deviance/mBiolog$null.deviance)
mAnthro = gam(Fq_250Hz ~ s(sunAlt,bs = "cr",sp = global.Gamm250$sp[8:9]) +(nShips),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mAnthro)
devAnthro = 1-(mAnthro$deviance/mAnthro$null.deviance)
mSeason = gam(Fq_250Hz ~ s(Jul,bs = "cr",sp = global.Gamm250$sp[1:2]) + s(hour2, bs="cc", sp = global.Gamm250$sp[3]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mSeason)
devSeason = 1-(mSeason$deviance/mSeason$null.deviance)
ModComp = rbind(ModComp, c("250Hz", 1-(global.Gamm250$deviance/global.Gamm250$null.deviance), devPhysic, devSeason, devBiolog, devAnthro))
#c. PLOTS to save out for each FQ band
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq250_modelPerfo.png",
width=600, height=350,pointsize = 12)
par(mfrow=c( 1,1))
pSPL = (predict(global.Gamm250))
plot(dataModel$Fq_250Hz, predict(global.Gamm250), main = "Predicted vs Actual")
dev.off()
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq250_ConditionalPlots.png")
par(mfrow=c( 3,4))
visreg(global.Gamm250,main="250Hz")
dev.off()
#-------------------------
##3) 500 Hz octave band
#-------------------------
#a. GLOBAL model-- all possible variables
options(na.action = "na.omit")
global.Gamm500 = gam(Fq_500Hz ~ s(Jul,bs = "cr") + s(hour2, bs="cc")+
s(ice_concentration_20km,bs = "cr") + s(HourlyWindSpeed2int,bs = "cr") +
s(sunAlt,bs = "cr") + (nShips) +
(Bmy)+ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
#model evaluation
summary(global.Gamm500)
anova(global.Gamm500)#anova provides whole term significance for parametric terms
#par(mfrow=c( 3,3))
#visreg(global.Gamm500)
p1 = visreg(global.Gamm500,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (500 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=10, y=125, label="(A)") + theme_bw()
p2 = visreg(global.Gamm500,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
p3 = visreg(global.Gamm500,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
grid.arrange(p1, p2, p3, ncol=3, nrow = 1)
#all significant variables- option for figure 4, can I overlay bio one one graph- not with points because
pJulian5 = visreg(global.Gamm500,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (500 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=18, y=125, label="(A)") + theme_bw()
pIce5 = visreg(global.Gamm500,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
pWind5 = visreg(global.Gamm500,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
pBmy5 = visreg(global.Gamm500,"Bmy", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Bmy") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(E)") + theme_bw()
pBal5 = visreg(global.Gamm500,"Bal", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Baleen") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(F)") + theme_bw()
pUbi5 = visreg(global.Gamm500,"Ubi", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Unidentified Biologic") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(G)") + theme_bw()
pBio5 = visreg(global.Gamm500,"Ubi","Bal","Bmy", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Unidentified Biologic") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(G)") + theme_bw()
grid.arrange(pJulian5, pWind5, pIce5, pBmy5, pBal5, pUbi5, ncol=3, nrow = 2)
grid.arrange(pJulian5, pWind5, pIce5,pBal5, ncol=2, nrow = 2)
#dispersion parameter a la Fox and Weisberg 2019 Companion to Applied Regression, dispersion < 1, model not overdispersed (no random variables, so not relevant??)
#https://stackoverflow.com/questions/59342595/how-to-check-for-overdispersion-in-a-gam-with-negative-binomial-distribution
sum(residuals(global.Gamm500,"pearson")^2)/df.residual(global.Gamm500)
#autocorrelation
pacf(residuals(global.Gamm500)) #why does this not change when I add in corelation term to model?
#b. DETERMINE how much of the variability in the dependent variable, using the global model
# explained by each term in the model https://stat.ethz.ch/pipermail/r-help/2007-October/142811.html
# first version, all models include seasonal component... decided to separate out these terms
#smoothing parameters: global.Gamm500$sp, so we can hold the smoothing terms fixed.
global.Gamm500$sp
mPhysic = gam(Fq_500Hz ~ s(ice_concentration_20km,bs = "cr",sp = global.Gamm500$sp[3:4] ) +
s(HourlyWindSpeed2int,bs = "cr",sp = global.Gamm500$sp[5:6]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mPhysic)
devPhysic = 1-(mPhysic$deviance/mPhysic$null.deviance)
mBiolog = gam(Fq_500Hz ~ (Bmy)+ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mBiolog)
devBiolog = 1-(mBiolog$deviance/mBiolog$null.deviance)
mAnthro = gam(Fq_500Hz ~ s(sunAlt,bs = "cr",sp = global.Gamm500$sp[8:9]) +(nShips),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mAnthro)
devAnthro = 1-(mAnthro$deviance/mAnthro$null.deviance)
mSeason = gam(Fq_500Hz ~ s(Jul,bs = "cr",sp = global.Gamm500$sp[1:2]) + s(hour2, bs="cc", sp = global.Gamm500$sp[3]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mSeason)
devSeason = 1-(mSeason$deviance/mSeason$null.deviance)
ModComp = rbind(ModComp, c("500Hz", 1-(global.Gamm500$deviance/global.Gamm500$null.deviance), devPhysic, devSeason, devBiolog, devAnthro ))
#c. PLOTS to save out for each FQ band
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq500_modelPerfo.png",
width=600, height=350,pointsize = 12)
par(mfrow=c( 1,1))
pSPL = (predict(global.Gamm500))
plot(dataModel$Fq_500Hz, predict(global.Gamm500), main = "Predicted vs Actual")
dev.off()
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq500_ConditionalPlots.png")
par(mfrow=c( 3,4))
visreg(global.Gamm500,main="500Hz")
dev.off()
#-------------------------
##4) 1000 Hz octave band
#-------------------------
#a. GLOBAL model-- all possible variables
options(na.action = "na.omit")
global.Gamm1000 = gam(Fq_1000Hz ~ s(Jul,bs = "cr") + s(hour2, bs="cc")+
s(ice_concentration_20km,bs = "cr") + s(HourlyWindSpeed2int,bs = "cr") +
s(sunAlt,bs = "cr") + (nShips) +
(Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
#model evaluation
summary(global.Gamm1000)
anova(global.Gamm1000)#anova provides whole term significance for parametric terms
#par(mfrow=c( 3,3))
#visreg(global.Gamm1000)
p1 = visreg(global.Gamm1000,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (1000 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=10, y=125, label="(A)") + theme_bw()
p2 = visreg(global.Gamm1000,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
p3 = visreg(global.Gamm1000,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
grid.arrange(p1, p2, p3, ncol=3, nrow = 1)
#all significant variables- supplementary info
pJulian = visreg(global.Gamm1000,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (1000 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=18, y=125, label="(A)") + theme_bw()
pHour = visreg(global.Gamm1000,"hour2", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Hour") + ylim(c(70,125))+
annotate("text", x=1, y=125, label="(D)") + theme_bw()
pIce = visreg(global.Gamm1000,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
pWind = visreg(global.Gamm1000,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
pSun = visreg(global.Gamm1000,"sunAlt", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Sun") + ylim(c(70,125))+
annotate("text", x=-0.8, y=125, label="(E)") + theme_bw()
pBal = visreg(global.Gamm1000,"Bal", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Baleen") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(F)") + theme_bw()
pOro = visreg(global.Gamm1000,"Ubi", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Oro") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(H)") + theme_bw()
pEba = visreg(global.Gamm1000,"Eba", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Eba") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(G)") + theme_bw()
grid.arrange(pJulian, pWind, pIce, pHour, pSun, pBal, pEba, pOro, ncol=4, nrow = 2)
#dispersion parameter a la Fox and Weisberg 2019 Companion to Applied Regression, dispersion < 1, model not overdispersed (no random variables, so not relevant??)
#https://stackoverflow.com/questions/59342595/how-to-check-for-overdispersion-in-a-gam-with-negative-binomial-distribution
sum(residuals(global.Gamm1000,"pearson")^2)/df.residual(global.Gamm1000)
#autocorrelation
pacf(residuals(global.Gamm1000)) #why does this not change when I add in corelation term to model?
#b. DETERMINE how much of the variability in the dependent variable, using the global model
# explained by each term in the model https://stat.ethz.ch/pipermail/r-help/2007-October/142811.html
# first version, all models include seasonal component... decided to separate out these terms
#smoothing parameters: global.Gamm1000$sp, so we can hold the smoothing terms fixed.
global.Gamm1000$sp
mPhysic = gam(Fq_1000Hz ~ s(ice_concentration_20km,bs = "cr",sp = global.Gamm1000$sp[4:5] ) + s(HourlyWindSpeed2int,bs = "cr",sp = global.Gamm1000$sp[6:7]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mPhysic)
devPhysic = 1-(mPhysic$deviance/mPhysic$null.deviance)
mBiolog = gam(Fq_1000Hz ~ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mBiolog)
devBiolog = 1-(mBiolog$deviance/mBiolog$null.deviance)
mAnthro = gam(Fq_1000Hz ~ s(sunAlt,bs = "cr",sp = global.Gamm1000$sp[8:9]) +(nShips),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mAnthro)
devAnthro = 1-(mAnthro$deviance/mAnthro$null.deviance)
mSeason = gam(Fq_1000Hz ~ s(Jul,bs = "cr",sp = global.Gamm1000$sp[1:2]) + s(hour2, bs="cc", sp = global.Gamm1000$sp[3]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mSeason)
devSeason = 1-(mSeason$deviance/mSeason$null.deviance)
ModComp = rbind(ModComp, c("1000Hz", 1-(global.Gamm1000$deviance/global.Gamm1000$null.deviance), devPhysic, devSeason, devBiolog, devAnthro ))
#c. PLOTS to save out for each FQ band
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq1000_modelPerfo.png",
width=600, height=350,pointsize = 12)
par(mfrow=c( 1,1))
pSPL = (predict(global.Gamm1000))
plot(dataModel$Fq_1000Hz, predict(global.Gamm1000), main = "Predicted vs Actual")
dev.off()
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq1000_ConditionalPlots.png")
par(mfrow=c( 3,4))
visreg(global.Gamm1000,main="1000Hz")
dev.off()
#-------------------------
##5) 2000 Hz octave band
#-------------------------
#a. GLOBAL model-- all possible variables
options(na.action = "na.omit")
global.Gamm2000 = gam(Fq_2000Hz ~ s(Jul,bs = "cr") + s(hour2, bs="cc")+
s(ice_concentration_20km,bs = "cr") + s(HourlyWindSpeed2int,bs = "cr") +
s(sunAlt,bs = "cr") + (nShips) +
(Bal)+ (Eba) + (Ubi)+ (Hfa)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
#model evaluation
summary(global.Gamm2000)
anova(global.Gamm2000)#anova provides whole term significance for parametric terms
#par(mfrow=c( 3,3))
#visreg(global.Gamm2000)
p1 = visreg(global.Gamm2000,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (2000 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=10, y=125, label="(A)") + theme_bw()
p2 = visreg(global.Gamm2000,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
p3 = visreg(global.Gamm2000,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
grid.arrange(p1, p2, p3, ncol=3, nrow = 1)
#all significant variables- supplementary info
pJulian = visreg(global.Gamm2000,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (2000 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=18, y=125, label="(A)") + theme_bw()
pHour = visreg(global.Gamm2000,"hour2", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Hour") + ylim(c(70,125))+
annotate("text", x=1, y=125, label="(D)") + theme_bw()
pIce = visreg(global.Gamm2000,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
pWind = visreg(global.Gamm2000,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
pSun = visreg(global.Gamm2000,"sunAlt", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Sun") + ylim(c(70,125))+
annotate("text", x=-0.8, y=125, label="(E)") + theme_bw()
pBal = visreg(global.Gamm2000,"Bal", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Baleen") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(F)") + theme_bw()
pEba = visreg(global.Gamm2000,"Eba", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Eba") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(G)") + theme_bw()
pDle = visreg(global.Gamm2000,"Dle", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Dle") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(H)") + theme_bw()
grid.arrange(pJulian, pWind, pIce, pHour, pSun, pBal, pEba, pDle, ncol=4, nrow = 2)
#dispersion parameter a la Fox and Weisberg 2019 Companion to Applied Regression, dispersion < 1, model not overdispersed (no random variables, so not relevant??)
#https://stackoverflow.com/questions/59342595/how-to-check-for-overdispersion-in-a-gam-with-negative-binomial-distribution
sum(residuals(global.Gamm2000,"pearson")^2)/df.residual(global.Gamm2000)
#autocorrelation
pacf(residuals(global.Gamm2000)) #why does this not change when I add in corelation term to model?
#b. DETERMINE how much of the variability in the dependent variable, using the global model
# explained by each term in the model https://stat.ethz.ch/pipermail/r-help/2007-October/142811.html
# first version, all models include seasonal component... decided to separate out these terms
#smoothing parameters: global.Gamm2000$sp, so we can hold the smoothing terms fixed.
global.Gamm2000$sp
mPhysic = gam(Fq_2000Hz ~ s(ice_concentration_20km,bs = "cr",sp = global.Gamm2000$sp[4:5] ) + s(HourlyWindSpeed2int,bs = "cr",sp = global.Gamm2000$sp[6:7]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mPhysic)
devPhysic = 1-(mPhysic$deviance/mPhysic$null.deviance)
mBiolog = gam(Fq_2000Hz ~(Bal)+ (Eba) + (Ubi)+ (Hfa)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mBiolog)
devBiolog = 1-(mBiolog$deviance/mBiolog$null.deviance)
mAnthro = gam(Fq_2000Hz ~ s(sunAlt,bs = "cr",sp = global.Gamm2000$sp[8:9]) +(nShips),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mAnthro)
devAnthro = 1-(mAnthro$deviance/mAnthro$null.deviance)
mSeason = gam(Fq_2000Hz ~ s(Jul,bs = "cr",sp = global.Gamm2000$sp[1:2]) + s(hour2, bs="cc", sp = global.Gamm2000$sp[3]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mSeason)
devSeason = 1-(mSeason$deviance/mSeason$null.deviance)
ModComp = rbind(ModComp, c("2000Hz", 1-(global.Gamm2000$deviance/global.Gamm2000$null.deviance), devPhysic, devSeason, devBiolog, devAnthro ))
#c. PLOTS to save out for each FQ band
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq2000_modelPerfo.png",
width=600, height=350,pointsize = 12)
par(mfrow=c( 1,1))
pSPL = (predict(global.Gamm2000))
plot(dataModel$Fq_2000Hz, predict(global.Gamm2000), main = "Predicted vs Actual")
dev.off()
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq2000_ConditionalPlots.png")
par(mfrow=c( 3,4))
visreg(global.Gamm2000,main="2000Hz")
dev.off()
#-------------------------
##6) 8000 Hz octave band
#-------------------------
#a. GLOBAL model-- all possible variables
options(na.action = "na.omit")
global.Gamm8000 = gam(Fq_8000Hz ~ s(Jul,bs = "cr") + s(hour2, bs="cc")+
s(ice_concentration_20km,bs = "cr") + s(HourlyWindSpeed2int,bs = "cr") +
s(sunAlt,bs = "cr") + (nShips) +
(Bal)+ (Ubi)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
#model evaluation
summary(global.Gamm8000)
anova(global.Gamm8000)#anova provides whole term significance for parametric terms
#par(mfrow=c( 3,3))
#visreg(global.Gamm8000)
# FIGURE 4-- 500 and 8000 results
p1 = visreg(global.Gamm8000,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (8000 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=20, y=125, label="(A)") + theme_bw()
p2 = visreg(global.Gamm8000,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
p3 = visreg(global.Gamm8000,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
pDle = visreg(global.Gamm8000,"Dle", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Dle") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(D)") + theme_bw()
pJulian5 = visreg(global.Gamm500,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (500 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=20, y=125, label="(A)") + theme_bw()
pIce5 = visreg(global.Gamm500,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
pWind5 = visreg(global.Gamm500,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
pBal5 = visreg(global.Gamm500,"Bal", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Baleen") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(D)") + theme_bw()
grid.arrange(pJulian5, pWind5, pIce5, pBal5, p1,p2,p3,pDle, ncol=4, nrow = 2)
#grid.arrange(pJulian5, p1, pWind5, p2, pIce5, p3, pBal5,pDle, ncol=2, nrow = 4)
#dispersion parameter a la Fox and Weisberg 2019 Companion to Applied Regression, dispersion < 1, model not overdispersed (no random variables, so not relevant??)
#https://stackoverflow.com/questions/59342595/how-to-check-for-overdispersion-in-a-gam-with-negative-binomial-distribution
sum(residuals(global.Gamm8000,"pearson")^2)/df.residual(global.Gamm8000)
#autocorrelation
pacf(residuals(global.Gamm8000)) #why does this not change when I add in corelation term to model?
#b. DETERMINE how much of the variability in the dependent variable, using the global model
# explained by each term in the model https://stat.ethz.ch/pipermail/r-help/2007-October/142811.html
# first version, all models include seasonal component... decided to separate out these terms
#smoothing parameters:global.Gamm8000$sp, so we can hold the smoothing terms fixed.
global.Gamm8000$sp
mPhysic = gam(Fq_8000Hz ~ s(ice_concentration_20km,bs = "cr",sp = global.Gamm8000$sp[4:5] ) + s(HourlyWindSpeed2int,bs = "cr",sp = global.Gamm8000$sp[6:7]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mPhysic)
devPhysic = 1-(mPhysic$deviance/mPhysic$null.deviance)
mBiolog = gam(Fq_8000Hz ~ (Bal)+ (Ubi)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mBiolog)
devBiolog = 1-(mBiolog$deviance/mBiolog$null.deviance)
mAnthro = gam(Fq_8000Hz ~ s(sunAlt,bs = "cr",sp = global.Gamm8000$sp[8:9]) +(nShips),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mAnthro)
devAnthro = 1-(mAnthro$deviance/mAnthro$null.deviance)
mSeason = gam(Fq_8000Hz ~ s(Jul,bs = "cr",sp = global.Gamm8000$sp[1:2]) + s(hour2, bs="cc", sp = global.Gamm8000$sp[3]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mSeason)
devSeason = 1-(mSeason$deviance/mSeason$null.deviance)
ModComp = rbind(ModComp, c("8000Hz", 1-(global.Gamm8000$deviance/global.Gamm8000$null.deviance), devPhysic, devSeason, devBiolog, devAnthro ))
#c. PLOTS to save out for each FQ band
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq8000_modelPerfo.png",
width=600, height=350,pointsize = 12)
par(mfrow=c( 1,1))
pSPL = (predict(global.Gamm8000))
plot(dataModel$Fq_8000Hz, predict(global.Gamm8000), main = "Predicted vs Actual")
dev.off()
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq8000_ConditionalPlots.png")
par(mfrow=c( 3,4))
visreg(global.Gamm8000,main="8000Hz")
dev.off()
#-------------------------
##PLOT model resuts as a funciton of frequeny band to show which bands are dominated by what source category
#-------------------------
colnames(ModComp) = c("Frequency","Global","Abiotic","Seasonal","Biological","Anthropogenic")
ModComp = as.data.frame(ModComp)
ModComp$Global = as.numeric(as.character( ModComp$Global) )
ModComp$Physical = as.numeric(as.character( ModComp$Physical) )
ModComp$Seasonal = as.numeric(as.character( ModComp$Seasonal) )
ModComp$Biological = as.numeric(as.character( ModComp$Biological) )
ModComp$Anthropogenic = as.numeric(as.character( ModComp$Anthropogenic) )
means.long<-reshape2::melt(ModComp,id.vars="Frequency")
positions <- c("125Hz", "250Hz", "500Hz", "1000Hz", "2000Hz", "8000Hz")
#stacked-- not what I want because does a total, unless I remove global model from it
dev.off()
ggplot(means.long,aes(x=Frequency,y=value,fill=factor(variable)))+
#geom_bar(position = "fill", stat = "identity") +
geom_bar( stat = "identity", colour="black") +
scale_x_discrete(limits = positions)+
scale_fill_discrete(name="Source Category")+
xlab("")+ ylab("Deviance explained")
#side-by-side-- use this!
ggplot(means.long,aes(x=Frequency,y=value,fill=factor(variable)))+
geom_bar(stat="identity",position="dodge",colour="black")+
scale_x_discrete(limits = positions) +
xlab("")+ ylab("Deviance explained")+
scale_fill_manual(name="Soundscape category models", values = c("black", "dark gray","#A3A500","#00BFC4","#F8766D"))+
theme(legend.position = c(0.14, 0.8))
#pink, green, blue, red
c("#00BFC4","#F8766D","black")
hue_pal()(5)
#Copy to spreadsheet for mabuscript tables
#Global model results
summary(global.Gamm125)
summary(global.Gamm250)
summary(global.Gamm500)
summary(global.Gamm1000)
summary(global.Gamm2000)
summary(global.Gamm8000)
#-------------------------
# exploring other model strutures with temporal variables as "by" variable
#-------------------------
dataModel$month
global.Gamm125 = gam(Fq_125Hz ~
s(ice_concentration_20km, bs = "cr", by = (mthS)) + s(HourlyWindSpeed2int, bs = "cr", by = mthS) +
s(sunAlt,bs = "cr") + (nShips) +
(Bmy)+ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(global.Gamm125)
visreg(global.Gamm125,main="8000Hz")
plot(global.Gamm125)
#REMOVED: s(Jul,bs = "cr") + s(hour2, bs="cc")+
|
/modelPlot_SoundscapeData.R
|
no_license
|
mfmckenna/MM_AKSoundscape
|
R
| false
| false
| 59,648
|
r
|
# Uses output of integrateSoundsscapeData.R to create plots and models for AK soundscape project
rm(list=ls())
#general
library(dplyr)
library(data.table)
library(ggplot2)
#for model
library(suncalc)
library(mgcv)
library(zoo)
library(MuMIn)
library(visreg)
library(corrplot)
#for map
library("sf")
library("rnaturalearth")
library("rnaturalearthdata")
library (geosphere)
library(ggsn)
#IMPORT data for Gambell site for entire year 2015-2016
#--------------------------------------------------------------------------------
#includes ambient levels, presence of sounds, AIS ships, and wind data
load("D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\data\\dataSpAShWeTiIce")
#load("D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\data\\dataSpAShWe")
#load("D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\data\\dataSpAShWeIce")
dataSpAShWeIce = dataSpAShWeTiIce
#PLOT MAP OF REGION-- Figure 1
#--------------------------------------------------------------------------------
#world map (map with ships see rdAIS_HarrisSat.r)
theme_set(theme_bw())
world = ne_countries(scale = "medium", returnclass = "sf")
class(world)
# ggplot(data = world ) +
# geom_sf() +
# geom_rect(xmin = -170, xmax = -166, ymin = 64, ymax = 66,
# fill = NA, colour = "black", size = 1.5)
AKprog = 3467 # c(-4596612.39 -2250856.49) , c( 2,024,122.30 4,364,571.46)
WGS84proj = 4326
sites <- st_as_sf(data.frame( latitude = c(65.69976,63.8178), longitude = c(-168.38855,-171.6915) ),
coords = c("longitude", "latitude"), crs = WGS84proj,
agr = "constant")# Bering/Gambell
# AK map for context with bounding box (add labels in illustrator)
ggplot(data = world) +
geom_sf(aes(fill = region_wb)) +
geom_sf(data = sites, size = 3, shape = 23, fill = "darkred") +
coord_sf(crs = st_crs(AKprog),
xlim = c(-1800000, 800000),
ylim = c(240000, 2500000), expand = FALSE, datum = NA) +
scale_fill_viridis_d(option = "E") +
theme(legend.position = "none", axis.title.x = element_blank(),
axis.title.y = element_blank(), panel.background = element_rect(fill = "azure"),
panel.border = element_rect(fill = NA))
##label in illustrator, scale bar not working
#annotate(geom = "text", x = 64, y = -151, label = "Gulf of Mexico",
#fontface = "italic", color = "grey22", size = 6) +
#scalebar(sites, dist = 100, dist_unit = "km", transform = TRUE, model = "WGS84", location="bottomright") +
#PLOT Sound sources-- Figure 2 (see integreateSoundscapeData. R to generate this figure)
#--------------------------------------------------------------------------------
cols <- sapply(dataSpAShWeIce, is.logical)
dataSpAShWeIce[,cols] <- lapply(dataSpAShWeIce[,cols], as.numeric)
dataSpAShWeIceem = reshape :: melt(dataSpAShWeIce, id.vars = "startDateTime",
measure.vars = c("Bmy","Dle","Oro","Eba","Hfa","Bal","Ice","Unk","Anth","Ubi",'nShips'))
dataSpAShWeIceem$Day = as.Date(dataSpAShWeIceem$startDateTime)
uSource = unique(dataSpAShWeIceem$variable)
daySum = NULL
for (ii in 1:length(uSource)){
dtmp = dataSpAShWeIceem[dataSpAShWeIceem$variable == uSource[ii],]
#all days with source of interest
uday = unique(dtmp$Day)
for (dd in 1:length(uday)){ #for each day total up the samples with source and total samples
dtmp2 = dtmp[dtmp$Day == uday[dd],]
daySum = rbind(daySum, c( (as.character( uday[dd])), as.character(uSource[ii]),
sum(as.numeric(dtmp2$value), na.rm = T),
nrow(dtmp2)) )
}
rm(dtmp,uday,dtmp2)
}
# find how many day with source present to add to y-label on graphic
uSource = unique(dataSpAShWeIceem$variable)
daySum = as.data.frame(daySum)
SourceCnt = NULL
for (ii in 1:length(uSource)) {
tmp = daySum[daySum$V2 == uSource[ii],]
tmp2 = sum(as.numeric( as.character(tmp$V3 ))> 0)
SourceCnt = rbind( SourceCnt, c(as.character(uSource[ii]), tmp2, tmp2/nrow(tmp)))
rm(tmp,tmp2)
}
#plot
colnames(daySum) = c("Day","variable","samples","total")
daySum$Day2 = as.Date(daySum$Day)
daySum$perSample = as.numeric(as.character(daySum$samples))/as.numeric(as.character(daySum$total))*100
uvar = unique(daySum$variable)
uorder = c("k","h","j","i","f","g","d","a","c","e","b" ) #c(1,4,2,3,6,5,8,11,9,7,10
ulabs = c("Unknown (78)", "AIS ships (73)", "Anthropogenic (4)", "Ice (100)",
"Unknown Biological (149)","Ribbon seal (1)", "Baleen whale (24)","Beluga (88)", "Bearded seal (136)", "Walrus (155)", "Bowhead (157)" )
for (i in 1:length(uvar)){
idxO = which(daySum$variable == uvar[i])
daySum$Order[idxO] = paste0(uorder[i],uvar[i])
}
ggplot(daySum, aes(Day2, Order, fill= as.numeric(perSample))) +
geom_tile() +
scale_fill_gradient(low="white", high="blue") +
scale_y_discrete(labels = ulabs) +
labs(fill = "% Daily Samples") +
xlab("") +
ylab("")
#PLOT Sound sources-- Figure 3 A)Bowhead B) Wind speed C)AIS ships <5 km, all with ambient
#--------------------------------------------------------------------------------
# codes: feed|unk|ic|bmy|dle|oro|eba|erb|hfa|mbo|bac|uba|ubi
colFreq = c(58,90)
quants <- c(.10,0.50,.90)
quants1 = .5
#------change these for source of interest------#
src = "Biological" #only baleen whales
SOI = dataSpAShWeIce[ !grepl("feed|unk|ice|dle|oro|eba|erb|hfa",dataSpAShWeIce$sps2) & dataSpAShWeIce$Ship == 0 & dataSpAShWeIce$Sounds > 0 ,]
SOI = SOI[!is.na(SOI$`8`),] #remove rows with NA for acoustic values
#SOI$BioOnly
names( SOI )[colFreq[1]:colFreq[2]] = c(6.3, 8, 10, 12.5, 16, 20, 25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000)
freq = (names(SOI[colFreq[1]:colFreq[2]]) )
mBio= apply( SOI[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
mBiological = apply( SOI[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants , na.rm = TRUE )
mBiological2 = cbind(as.data.frame(t(mBiological)),freq)
colnames(mBiological2) = c('p10th',"p50th","p90th","freq")
# ggplot( mBiological2, aes(y=p50th, ymax=p10th, ymin=p90th, x = as.numeric( as.character(freq))) ) +
# geom_point() + geom_line()+
# geom_ribbon(linetype=2, alpha=0.1) +
# scale_x_log10() +
# ylim( c(50,120)) +
# xlab("Frequency") +
# ylab(expression( paste("1/3 Octave Band Sound Pressure Level dB re: 1",mu,"Pa")) )+
# annotate("text", x=1000, y=120, label= paste(src, " samples (N = ", nrow(SOI), " on ", length(unique(SOI$dateStart) ), " days)", sep="") )
##ribbon was not working to set y limits on the graph....
pBio = ggplot( mBiological2) +
geom_point(aes(y=p50th, x = as.numeric( as.character(freq))) ) +
geom_line(aes(y=p50th, x = as.numeric( as.character(freq))),size=1.5 )+
geom_line(aes(y=p10th, x = as.numeric( as.character(freq))),color = "gray")+
geom_line(aes(y=p90th, x = as.numeric( as.character(freq))),color = "gray")+
scale_x_log10() +
ylim( c(70,130)) +
xlab("Frequency") +
ylab(expression( paste("1/3 Octave Band SPL dB re: 1",mu,"Pa")) )+
annotate("text", x=10, y=128, label= "(C)", size=5) +
annotate("text", x=150, y=75, label= paste(src, " only samples (N = ", nrow(SOI), " on ", length(unique(SOI$dateStart) ), " days)", sep=""), size=5 )
rm(src,SOI)
src = "Bowhead"
SOI = dataSpAShWeIce[!grepl("feed|unk|ice|dle|oro|eba|erb|hfa|mbo|bac|uba|ubi",dataSpAShWeIce$sps2) & dataSpAShWeIce$Ship == 0 & dataSpAShWeIce$Sounds > 0,]
SOI = SOI[!is.na(SOI$`8`),] #remove rows with NA for acoustic values
names( SOI )[colFreq[1]:colFreq[2]] = c(6.3, 8, 10, 12.5, 16, 20, 25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000)
freq = (names(SOI[colFreq[1]:colFreq[2]]) )
mBowhead = apply( SOI[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants , na.rm = TRUE )
mBowhead2 = cbind(as.data.frame(t(mBowhead)),freq)
colnames(mBowhead2) = c('p10th',"p50th","p90th","freq")
pBow = ggplot( mBowhead2) +
geom_point(aes(y=p50th, x = as.numeric( as.character(freq))) ) +
geom_line(aes(y=p50th, x = as.numeric( as.character(freq))),size=1.5 )+
geom_line(aes(y=p10th, x = as.numeric( as.character(freq))),color = "gray")+
geom_line(aes(y=p90th, x = as.numeric( as.character(freq))),color = "gray")+
scale_x_log10() +
ylim( c(70,130)) +
xlab("Frequency") +
ylab(expression( paste("1/3 Octave Band SPL dB re: 1",mu,"Pa")) )+
annotate("text", x=10, y=128, label= "(D)", size=5) +
annotate("text", x=150, y=75, label= paste(src, " only samples (N = ", nrow(SOI), " on ", length(unique(SOI$dateStart) ), " days)", sep=""), size=5 )
rm(src,SOI)
src = "AIS Ship" #(D), only at windspeeds less than 10 knots
SOI = dataSpAShWeIce[ !grepl("feed|unk|ice|bmy|dle|oro|eba|erb|hfa|mbo|bac|uba|ubi", dataSpAShWeIce$sps2) & dataSpAShWeIce$Ship > 0,]
SOI = SOI[!is.na(SOI$`8`),] #remove rows with NA for acoustic values
#only ships within 5 km, traveling more than 5 knots
#need to deal with cells with multiple ship values!!!
SOI = SOI[as.numeric(as.character(SOI$mnDist)) >= 5000 ,]#only ships within 5 km
SOI = SOI[as.numeric(as.character(SOI$uSOG)) >= 5 ,]#only ships speed > 5 kts
SOI = SOI[as.numeric(as.character(SOI$HourlyWindSpeed)) <= 10 ,]#only ships in < 10 knot winds
names( SOI )[colFreq[1]:colFreq[2]] = c(6.3, 8, 10, 12.5, 16, 20, 25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000)
freq = (names(SOI[colFreq[1]:colFreq[2]]) )
mSh = apply( SOI[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
mShips = apply( SOI[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants , na.rm = TRUE )
mShips2 = cbind(as.data.frame(t(mShips)),freq)
colnames(mShips2) = c('p10th',"p50th","p90th","freq")
pShi = ggplot( mShips2) +
geom_point(aes(y=p50th, x = as.numeric( as.character(freq))) ) +
geom_line(aes(y=p50th, x = as.numeric( as.character(freq))),size=1.5 )+
geom_line(aes(y=p10th, x = as.numeric( as.character(freq))),color = "gray")+
geom_line(aes(y=p90th, x = as.numeric( as.character(freq))),color = "gray")+
scale_x_log10() +
ylim( c(70,130)) +
xlab("Frequency") +
ylab(expression( paste("1/3 Octave Band SPL dB re: 1",mu,"Pa")) )+
annotate("text", x=10, y=128, label= "(D)", size=5) +
annotate("text", x=150, y=75, label= paste(src, " only samples within 5 km (N = ", nrow(SOI), " on ", length(unique(SOI$dateStart) ), " days)", sep=""), size=5)
rm(src,SOI)
src = "Ambient" # unique( dataSpAShWeIce$HourlyWindSpeed )
SOI = dataSpAShWeIce[dataSpAShWeIce$Sounds == 0 & dataSpAShWeIce$Ship == 0 & dataSpAShWeIce$HourlyWindSpeed < 10, ,] #no sounds present unique(tst$Sounds)
SOI = SOI[!is.na(SOI$`8`),] #remove rows with NA for acoustic values
mAmbient = apply( SOI[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
#WIND SPEED
dataSpAShWeIce$HourlyWindSpeed2 = as.numeric( gsub("s","", as.character(dataSpAShWeIce$HourlyWindSpeed) ) )
dataSpAShWeIce$HourlyWindSpeed2r = round(as.numeric( gsub("s","", as.character(dataSpAShWeIce$HourlyWindSpeed) )),digits =-1)
SOI = dataSpAShWeIce[dataSpAShWeIce$Sounds == 0 & dataSpAShWeIce$Ship == 0 ,] #no sounds present unique(tst$Sounds)
ambData = SOI[!is.na(SOI$`8`),] #remove rows with NA for acoustic values
names( ambData )[colFreq[1]:colFreq[2]] = c(6.3, 8, 10, 12.5, 16, 20, 25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000)
freq = (names(ambData[colFreq[1]:colFreq[2]]) )
WS0 = ambData[ambData$HourlyWindSpeed2r == 0,]
nrow(WS0)
WS0 = apply( WS0[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
WS10 = ambData[ambData$HourlyWindSpeed2r == 10,]
nrow(WS10)
WS10 = apply( WS10[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
WS20 = ambData[ambData$HourlyWindSpeed2r == 20,]
nrow(WS20)
WS20 = apply( WS20[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
WS30 = ambData[ambData$HourlyWindSpeed2r == 30,]
nrow(WS30)
WS30 = apply( WS30[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
WS40 = ambData[ambData$HourlyWindSpeed2r == 40,]
nrow(WS40)
WS40 = apply( WS40[colFreq[1]:colFreq[2]] , 2 , quantile , probs = quants1 , na.rm = TRUE )
WSsum = rbind(WS0,WS10,WS20,WS30,WS40)
WSsum2 = reshape :: melt(WSsum)
library(scales)
# Median SPL wind speeds (A)
pWS = ggplot(WSsum2, aes(x=(X2), y=value, color = X1) )+
geom_line(size = 1) +
geom_point(size = 1)+
scale_x_log10() +
labs(color = "Wind speed [kts]")+
scale_color_manual(labels = c("0", "10", "20","30","40") , values = hue_pal()(5) ) +
xlab("Frequency") + ylab(expression( paste("Median SPL (Leq) dB re: 1",mu,"Pa"))) +
annotate("text", x=10, y=128, label= "(A)", size=5) +
ylim(c(70,130)) +
theme(legend.position = c(0.8, 0.2))
#theme_minimal()
#comparison of median spectra for source of interest (B)
mSOIs = reshape2::melt( rbind(mBio, mSh,mAmbient))
mSOIs$Var2 = as.numeric( as.character( mSOIs$Var2))
pMed = ggplot(mSOIs, aes(x=Var2, y = value, color = Var1))+
geom_line(size = 1)+
geom_point(size = 1)+
scale_x_log10() +
scale_color_manual(labels = c("Biological only", "AIS ships only", "Ambient") , values = c("#00BFC4","#F8766D","black") ) +
labs(color = "Sources present")+
annotate("text", x=10, y=128, label= "(B)", size=5) +
xlab("Frequency") + ylab(expression( paste("Median SPL (Leq) dB re: 1",mu,"Pa"))) +
theme(legend.position = c(0.8, 0.2))+
#theme_minimal()
ylim(c(70,130))
library(gridExtra)
grid.arrange(pWS, pMed, pBio, pShi, ncol=2, nrow = 2)
## RESPONSE variable-- ambient sound levels in octave bands
#--------------------------------------------------------------------------------
colnames(dataSpAShWeIce)[71] = "Fq_125Hz"
colnames(dataSpAShWeIce)[74] = "Fq_250Hz"
colnames(dataSpAShWeIce)[77] = "Fq_500Hz"
colnames(dataSpAShWeIce)[80] = "Fq_1000Hz"
colnames(dataSpAShWeIce)[83] = "Fq_2000Hz"
colnames(dataSpAShWeIce)[89] = "Fq_8000Hz"
##PREDICTOR variables-- reformat, distribution, values
#--------------------------------------------------------------------------------
par(mfrow=c( 3,2))
#1) MONTH- as indicator of season
dataSpAShWeIce$mthS = as.numeric(as.character(dataSpAShWeIce$mthS ))
hist(dataSpAShWeIce$mthS,main="Month" )
#2) NUMBER of sounds present
dataSpAShWeIce$Sounds = as.numeric(as.character(dataSpAShWeIce$Sounds ))
hist(dataSpAShWeIce$Sounds,main="#Sounds")
#3) NUMBER of ships present
dataSpAShWeIce$nShips = as.numeric(as.character(dataSpAShWeIce$nShips ))
#unique( dataSpAShWeIce$nShips )
dataSpAShWeIce$nShips[is.na(dataSpAShWeIce$nShips)] <- 0
hist(dataSpAShWeIce$nShips,main="#Ships" )
#4) PRESENCE OF SOURCES
#4a) Bowhead
dataSpAShWeIce$Bmy = as.numeric(as.character( dataSpAShWeIce$Bmy ))
hist(dataSpAShWeIce$Bmy,main="bowhead")
#unique(dataSpAShWeIce$Bmy )
#4b) Bearded seal
dataSpAShWeIce$Eba = as.numeric(as.character(dataSpAShWeIce$Eba ))
#hist(dataSpAShWeIce$Eba )
#unique(dataSpAShWeIce$Eba )
#4c Unknown biologiacl
dataSpAShWeIce$Ubi = as.numeric(as.character(dataSpAShWeIce$Ubi ))
#5d Baleen whale
dataSpAShWeIce$Bal = as.numeric(as.character(dataSpAShWeIce$Bal ))
#5) WINDSPEED
hist( dataSpAShWeIce$HourlyWindSpeed2 ,main = "wind")
#unique(dataSpAShWeIce$HourlyWindSpeed2)
#6) ICE coverage for the day
dataSpAShWeIce$ice_concentration_20km = as.numeric( as.character(dataSpAShWeIce$ice_concentration_20km) )
hist (dataSpAShWeIce$ice_concentration_20km,main="ice")
#7) SUN altitude- day light related to biological activity??
gambell = c(63.8227,171.6758)
tmp = getSunlightPosition(dataSpAShWeIce$dataTime,gambell[1],gambell[2])
dataSpAShWeIce$sunAlt = tmp$altitude
#8) JULIAN day
dataSpAShWeIce$Jul = as.numeric( as.character( ( yday(dataSpAShWeIce$dataTime) )))
#9) HOUR of the day
dataSpAShWeIce$hour2 = as.numeric( as.character( hour(dataSpAShWeIce$dataTime) ))
#hist(dataSpAShWeIce$hour2)
dCols = data.frame(colnames(dataSpAShWeIce))
dCols
##SOME CHECKS MODEL INPUTS
#--------------------------------------------------------------------------------
par(mfrow=c( 1,1))
#1) autocorrlation term- how correlated is SPL to previous hour?- very!!!
dataSpAShWeIce$Fq_125HzShift = shift(dataSpAShWeIce$Fq_125Hz,fill = NA)
plot(dataSpAShWeIce$Fq_125HzShift, dataSpAShWeIce$Fq_125Hz)
corACI = cor(dataSpAShWeIce$Fq_125HzShift,dataSpAShWeIce$Fq_125Hz, method = "pearson",use="complete.obs")
#2) predictor variable correlation-- only use month, not Julian day
newdata <- dataSpAShWeIce[c("mthS" , "Jul", "sunAlt",
"ice_concentration_20km" ,"HourlyWindSpeed2",
"Eba", "Ubi", "Bal", "nShips")]
corr = cor(newdata)
#corrplot(corr)
#2) NA values-- models can't deal with
idx = apply(is.na(dataSpAShWeIce[,c("Jul", "sunAlt", "ice_concentration_20km", "HourlyWindSpeed2", "Sounds", "Eba", "Bmy", "Bal","Ubi" )]),1,any)
remRows = nrow(dataSpAShWeIce) - nrow(dataSpAShWeIce[!idx,])
remRows - length (which(is.na(dataSpAShWeIce[,("HourlyWindSpeed2")] ) ) )
nrow(dataSpAShWeIce[!idx,])/ nrow(dataSpAShWeIce) #54% of data NA so removed--yikes!!
#interoloate because all wind- missing wind (what is the optimal gap-4 samples for an hour)
dataNA = dataSpAShWeIce[!idx,] #matrix of the missing wind data...
intrpNA = na.approx(dataSpAShWeIce$HourlyWindSpeed2,maxgap=(6),na.rm = FALSE )
# NAs still left
length(which(is.na(intrpNA )))
length(which(is.na(intrpNA )))/ nrow(dataSpAShWeIce) #38% of data NA so removed
par(mfrow=c( 2,1))
plot(intrpNA, pch = 20, col="blue", cex=.5, main = "Wind speed with interpolation max gap = 6")
plot(dataSpAShWeIce$HourlyWindSpeed2,pch = 20,col="blue", cex=.5, main = "Wind speed")
dataSpAShWeIce$HourlyWindSpeed2int = intrpNA
##MODEL-- gam
#--------------------------------------------------------------------------------
#smooth term selection using select=TRUE, which penalizes wiggliness and removes terms with poor fit from the model
#We also fit all models with gamma=1.4, which further restricts wiggliness
#smooth terms: https://www.rdocumentation.org/packages/mgcv/versions/1.8-33/topics/smooth.terms
#remove rows with NAs-- mostly from wind (ugh!)
dataModel = dataSpAShWeIce[!is.na(intrpNA),]
#check other variables for NAs...
unique(dataModel$hour2)
ck = dataModel[is.na(dataModel$hour2),]
unique(dataModel$Jul)
unique(dataModel$sunAlt)
unique(dataModel$ice_concentration_20km)
unique(dataModel$HourlyWindSpeed2int)
unique(dataModel$Eba)
unique(dataModel$Bmy)
unique(dataModel$Bal)
unique(dataModel$Ubi)
#HYPOTHESIS: enviroment still driven/predicted by wind and biological, not human activity
# add back in when variables set: correlation=corCAR1(value=corACI,form=~dataTime)
ctrl = list(nthreads=6)
rm(newdata,idx,remRows,tmp)
#-------------------------
##1) 125 Hz octave band model
#-------------------------
#a. GLOBAL model-- all possible variables
options(na.action = "na.omit")
global.Gamm125 = gam(Fq_125Hz ~ s(Jul,bs = "cr") + s(hour2, bs="cc")+
s(ice_concentration_20km,bs = "cr") + s(HourlyWindSpeed2int,bs = "cr") +
s(sunAlt,bs = "cr") + (nShips) +
(Bmy)+ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
#model evaluation
summary(global.Gamm125)
anova(global.Gamm125)#anova provides whole term significance for parametric terms
par(mfrow=c( 3,4))
visreg(global.Gamm125) #all variables
#abiotic variables
p1 = visreg(global.Gamm125,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (125 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=10, y=125, label="(A)") + theme_bw()
p2 = visreg(global.Gamm125,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
p3 = visreg(global.Gamm125,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
grid.arrange(p1, p2, p3, ncol=3, nrow = 1)
#all significant variables- supplementary info
pJulian = visreg(global.Gamm125,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (125 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=20, y=125, label="(A)") + theme_bw()
pWind = visreg(global.Gamm125,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
pIce = visreg(global.Gamm125,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
pHour = visreg(global.Gamm125,"hour2", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Hour") + ylim(c(70,125))+
annotate("text", x=2, y=125, label="(D)") + theme_bw()
pSun = visreg(global.Gamm125,"sunAlt", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Sun") + ylim(c(70,125))+
annotate("text", x=-0.7, y=125, label="(E)") + theme_bw()
pBmy = visreg(global.Gamm125,"Bmy", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Bowhead") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(F)") + theme_bw()
pBal = visreg(global.Gamm125,"Bal", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Baleen") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(G)") + theme_bw()
pEba = visreg(global.Gamm125,"Eba", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Eba") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(H)") + theme_bw()
pOro = visreg(global.Gamm125,"Oro", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Oro") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(I)") + theme_bw()
pUbi = visreg(global.Gamm125,"Ubi", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Unidentified Biologic") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(J)") + theme_bw()
grid.arrange(pJulian, pWind, pIce, pHour, pSun, pBmy, pBal,pEba,pOro,pUbi, ncol=5, nrow =2)
#dispersion parameter a la Fox and Weisberg 2019 Companion to Applied Regression, dispersion < 1, model not overdispersed (no random variables, so not relevant??)
#https://stackoverflow.com/questions/59342595/how-to-check-for-overdispersion-in-a-gam-with-negative-binomial-distribution
sum(residuals(global.Gamm125,"pearson")^2)/df.residual(global.Gamm125)
#autocorrelation
pacf(residuals(global.Gamm125)) #why does this not change when I add in corelation term to model?
#best model-- use DREDGE function to rank by ACI
# did not use b/c similar results to global model and want to evalute model for all variables
#options(na.action = "na.fail")
#global.Gamm125_dredge = dredge(global.Gamm125, rank=AIC)
#global.Gamm125_dredge
#subset(global.Gamm125_dredge, subset=delta<2 ) #2 models
#WOCR1<- get.models(global.Gamm125_dredge, subset = 1)[[508]] #best model
#summary(WOCR1)
#b. DETERMINE how much of the variability in the dependent variable, using the global model
# explained by each term in the model https://stat.ethz.ch/pipermail/r-help/2007-October/142811.html
# first version, all models include seasonal component... decided to separate out these terms
#smoothing parameters: global.Gamm125$sp, so we can hold the smoothing terms fixed.
global.Gamm125$sp
mPhysic = gam(Fq_125Hz ~ s(ice_concentration_20km,bs = "cr",sp = global.Gamm125$sp[4:5] ) + s(HourlyWindSpeed2int,bs = "cr",sp = global.Gamm125$sp[6:7]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mPhysic)
devPhysic = 1-(mPhysic$deviance/mPhysic$null.deviance)
mBiolog = gam(Fq_125Hz ~ (Bmy)+ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mBiolog)
devBiolog = 1-(mBiolog$deviance/mBiolog$null.deviance)
mAnthro = gam(Fq_125Hz ~ s(sunAlt,bs = "cr",sp = global.Gamm125$sp[8:9]) + (nShips),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mAnthro)
devAnthro = 1-(mAnthro$deviance/mAnthro$null.deviance)
mSeason = gam(Fq_125Hz ~ s(Jul,bs = "cr",sp = global.Gamm125$sp[1:2]) + s(hour2, bs="cc", sp = global.Gamm125$sp[3]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mSeason)
devSeason = 1-(mSeason$deviance/mSeason$null.deviance)
ModComp = NULL
ModComp = rbind(ModComp, c("125Hz", 1-(global.Gamm125$deviance/global.Gamm125$null.deviance), devPhysic, devSeason, devBiolog, devAnthro ))
# NOTE: used this https://stats.stackexchange.com/questions/325832/gam-mgcv-aic-vs-deviance-explained instead of this...
#ddevPhysic = deviance(global.Gamm125)- deviance(mPhysic)
#ddevBiolog = deviance(global.Gamm125)- deviance(mBiolog)
#ddevAnthro = deviance(global.Gamm125)- deviance(mAnthro)
#c. PLOTS to save out for each FQ band
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq125_modelPerfo.png",
width=600, height=350,pointsize = 12)
par(mfrow=c( 1,1))
pSPL = (predict(global.Gamm125))
plot(dataModel$Fq_125Hz, predict(global.Gamm125), main = "Predicted vs Actual")
dev.off()
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq125_ConditionalPlots.png")
par(mfrow=c(3,4))
visreg(global.Gamm125,main="125Hz")
dev.off()
#-------------------------
##2) 250 Hz octave band
#-------------------------
#a. GLOBAL model-- all possible variables
options(na.action = "na.omit")
global.Gamm250 = gam(Fq_250Hz ~ s(Jul,bs = "cr") + s(hour2, bs="cc")+
s(ice_concentration_20km,bs = "cr") + s(HourlyWindSpeed2int,bs = "cr") +
s(sunAlt,bs = "cr") + (nShips) +
(Bmy)+ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
#model evaluation
summary(global.Gamm250)
anova(global.Gamm250)#anova provides whole term significance for parametric terms
#par(mfrow=c( 3,3))
#visreg(global.Gamm250)
p1 = visreg(global.Gamm250,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (250 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=10, y=125, label="(A)") + theme_bw()
p2 = visreg(global.Gamm250,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
p3 = visreg(global.Gamm250,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
grid.arrange(p1, p2, p3, ncol=3, nrow = 1)
#all significant variables- supplementary info
pJulian = visreg(global.Gamm250,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (250 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=18, y=125, label="(A)") + theme_bw()
pHour = visreg(global.Gamm250,"hour2", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Hour") + ylim(c(70,125))+
annotate("text", x=1, y=125, label="(D)") + theme_bw()
pIce = visreg(global.Gamm250,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
pWind = visreg(global.Gamm250,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
pSun = visreg(global.Gamm250,"sunAlt", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Sun") + ylim(c(70,125))+
annotate("text", x=-0.8, y=125, label="(E)") + theme_bw()
pBal = visreg(global.Gamm250,"Bal", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Baleen") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(F)") + theme_bw()
pUbi = visreg(global.Gamm250,"Ubi", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Unidentified Biologic") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(G)") + theme_bw()
pEba = visreg(global.Gamm250,"Eba", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Eba") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(H)") + theme_bw()
grid.arrange(pJulian, pWind, pIce, pHour, pSun, pBal, pUbi, pEba, ncol=4, nrow = 2)
#dispersion parameter a la Fox and Weisberg 2019 Companion to Applied Regression, dispersion < 1, model not overdispersed (no random variables, so not relevant??)
#https://stackoverflow.com/questions/59342595/how-to-check-for-overdispersion-in-a-gam-with-negative-binomial-distribution
sum(residuals(global.Gamm250,"pearson")^2)/df.residual(global.Gamm250)
#autocorrelation
pacf(residuals(global.Gamm250)) #why does this not change when I add in corelation term to model?
#b. DETERMINE how much of the variability in the dependent variable, using the global model
# explained by each term in the model https://stat.ethz.ch/pipermail/r-help/2007-October/142811.html
# first version, all models include seasonal component... decided to separate out these terms
#smoothing parameters: global.Gamm250$sp, so we can hold the smoothing terms fixed.
global.Gamm250$sp
mPhysic = gam(Fq_250Hz ~ s(ice_concentration_20km,bs = "cr",sp = global.Gamm250$sp[4:5] ) + s(HourlyWindSpeed2int,bs = "cr",sp = global.Gamm250$sp[6:7]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mPhysic)
devPhysic = 1-(mPhysic$deviance/mPhysic$null.deviance)
mBiolog = gam(Fq_250Hz ~ (Bmy)+ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mBiolog)
devBiolog= 1-(mBiolog$deviance/mBiolog$null.deviance)
mAnthro = gam(Fq_250Hz ~ s(sunAlt,bs = "cr",sp = global.Gamm250$sp[8:9]) +(nShips),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mAnthro)
devAnthro = 1-(mAnthro$deviance/mAnthro$null.deviance)
mSeason = gam(Fq_250Hz ~ s(Jul,bs = "cr",sp = global.Gamm250$sp[1:2]) + s(hour2, bs="cc", sp = global.Gamm250$sp[3]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mSeason)
devSeason = 1-(mSeason$deviance/mSeason$null.deviance)
ModComp = rbind(ModComp, c("250Hz", 1-(global.Gamm250$deviance/global.Gamm250$null.deviance), devPhysic, devSeason, devBiolog, devAnthro))
#c. PLOTS to save out for each FQ band
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq250_modelPerfo.png",
width=600, height=350,pointsize = 12)
par(mfrow=c( 1,1))
pSPL = (predict(global.Gamm250))
plot(dataModel$Fq_250Hz, predict(global.Gamm250), main = "Predicted vs Actual")
dev.off()
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq250_ConditionalPlots.png")
par(mfrow=c( 3,4))
visreg(global.Gamm250,main="250Hz")
dev.off()
#-------------------------
##3) 500 Hz octave band
#-------------------------
#a. GLOBAL model-- all possible variables
options(na.action = "na.omit")
global.Gamm500 = gam(Fq_500Hz ~ s(Jul,bs = "cr") + s(hour2, bs="cc")+
s(ice_concentration_20km,bs = "cr") + s(HourlyWindSpeed2int,bs = "cr") +
s(sunAlt,bs = "cr") + (nShips) +
(Bmy)+ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
#model evaluation
summary(global.Gamm500)
anova(global.Gamm500)#anova provides whole term significance for parametric terms
#par(mfrow=c( 3,3))
#visreg(global.Gamm500)
p1 = visreg(global.Gamm500,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (500 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=10, y=125, label="(A)") + theme_bw()
p2 = visreg(global.Gamm500,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
p3 = visreg(global.Gamm500,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
grid.arrange(p1, p2, p3, ncol=3, nrow = 1)
#all significant variables- option for figure 4, can I overlay bio one one graph- not with points because
pJulian5 = visreg(global.Gamm500,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (500 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=18, y=125, label="(A)") + theme_bw()
pIce5 = visreg(global.Gamm500,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
pWind5 = visreg(global.Gamm500,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
pBmy5 = visreg(global.Gamm500,"Bmy", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Bmy") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(E)") + theme_bw()
pBal5 = visreg(global.Gamm500,"Bal", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Baleen") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(F)") + theme_bw()
pUbi5 = visreg(global.Gamm500,"Ubi", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Unidentified Biologic") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(G)") + theme_bw()
pBio5 = visreg(global.Gamm500,"Ubi","Bal","Bmy", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Unidentified Biologic") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(G)") + theme_bw()
grid.arrange(pJulian5, pWind5, pIce5, pBmy5, pBal5, pUbi5, ncol=3, nrow = 2)
grid.arrange(pJulian5, pWind5, pIce5,pBal5, ncol=2, nrow = 2)
#dispersion parameter a la Fox and Weisberg 2019 Companion to Applied Regression, dispersion < 1, model not overdispersed (no random variables, so not relevant??)
#https://stackoverflow.com/questions/59342595/how-to-check-for-overdispersion-in-a-gam-with-negative-binomial-distribution
sum(residuals(global.Gamm500,"pearson")^2)/df.residual(global.Gamm500)
#autocorrelation
pacf(residuals(global.Gamm500)) #why does this not change when I add in corelation term to model?
#b. DETERMINE how much of the variability in the dependent variable, using the global model
# explained by each term in the model https://stat.ethz.ch/pipermail/r-help/2007-October/142811.html
# first version, all models include seasonal component... decided to separate out these terms
#smoothing parameters: global.Gamm500$sp, so we can hold the smoothing terms fixed.
global.Gamm500$sp
mPhysic = gam(Fq_500Hz ~ s(ice_concentration_20km,bs = "cr",sp = global.Gamm500$sp[3:4] ) +
s(HourlyWindSpeed2int,bs = "cr",sp = global.Gamm500$sp[5:6]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mPhysic)
devPhysic = 1-(mPhysic$deviance/mPhysic$null.deviance)
mBiolog = gam(Fq_500Hz ~ (Bmy)+ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mBiolog)
devBiolog = 1-(mBiolog$deviance/mBiolog$null.deviance)
mAnthro = gam(Fq_500Hz ~ s(sunAlt,bs = "cr",sp = global.Gamm500$sp[8:9]) +(nShips),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mAnthro)
devAnthro = 1-(mAnthro$deviance/mAnthro$null.deviance)
mSeason = gam(Fq_500Hz ~ s(Jul,bs = "cr",sp = global.Gamm500$sp[1:2]) + s(hour2, bs="cc", sp = global.Gamm500$sp[3]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mSeason)
devSeason = 1-(mSeason$deviance/mSeason$null.deviance)
ModComp = rbind(ModComp, c("500Hz", 1-(global.Gamm500$deviance/global.Gamm500$null.deviance), devPhysic, devSeason, devBiolog, devAnthro ))
#c. PLOTS to save out for each FQ band
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq500_modelPerfo.png",
width=600, height=350,pointsize = 12)
par(mfrow=c( 1,1))
pSPL = (predict(global.Gamm500))
plot(dataModel$Fq_500Hz, predict(global.Gamm500), main = "Predicted vs Actual")
dev.off()
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq500_ConditionalPlots.png")
par(mfrow=c( 3,4))
visreg(global.Gamm500,main="500Hz")
dev.off()
#-------------------------
##4) 1000 Hz octave band
#-------------------------
#a. GLOBAL model-- all possible variables
options(na.action = "na.omit")
global.Gamm1000 = gam(Fq_1000Hz ~ s(Jul,bs = "cr") + s(hour2, bs="cc")+
s(ice_concentration_20km,bs = "cr") + s(HourlyWindSpeed2int,bs = "cr") +
s(sunAlt,bs = "cr") + (nShips) +
(Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
#model evaluation
summary(global.Gamm1000)
anova(global.Gamm1000)#anova provides whole term significance for parametric terms
#par(mfrow=c( 3,3))
#visreg(global.Gamm1000)
p1 = visreg(global.Gamm1000,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (1000 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=10, y=125, label="(A)") + theme_bw()
p2 = visreg(global.Gamm1000,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
p3 = visreg(global.Gamm1000,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
grid.arrange(p1, p2, p3, ncol=3, nrow = 1)
#all significant variables- supplementary info
pJulian = visreg(global.Gamm1000,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (1000 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=18, y=125, label="(A)") + theme_bw()
pHour = visreg(global.Gamm1000,"hour2", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Hour") + ylim(c(70,125))+
annotate("text", x=1, y=125, label="(D)") + theme_bw()
pIce = visreg(global.Gamm1000,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
pWind = visreg(global.Gamm1000,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
pSun = visreg(global.Gamm1000,"sunAlt", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Sun") + ylim(c(70,125))+
annotate("text", x=-0.8, y=125, label="(E)") + theme_bw()
pBal = visreg(global.Gamm1000,"Bal", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Baleen") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(F)") + theme_bw()
pOro = visreg(global.Gamm1000,"Ubi", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Oro") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(H)") + theme_bw()
pEba = visreg(global.Gamm1000,"Eba", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Eba") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(G)") + theme_bw()
grid.arrange(pJulian, pWind, pIce, pHour, pSun, pBal, pEba, pOro, ncol=4, nrow = 2)
#dispersion parameter a la Fox and Weisberg 2019 Companion to Applied Regression, dispersion < 1, model not overdispersed (no random variables, so not relevant??)
#https://stackoverflow.com/questions/59342595/how-to-check-for-overdispersion-in-a-gam-with-negative-binomial-distribution
sum(residuals(global.Gamm1000,"pearson")^2)/df.residual(global.Gamm1000)
#autocorrelation
pacf(residuals(global.Gamm1000)) #why does this not change when I add in corelation term to model?
#b. DETERMINE how much of the variability in the dependent variable, using the global model
# explained by each term in the model https://stat.ethz.ch/pipermail/r-help/2007-October/142811.html
# first version, all models include seasonal component... decided to separate out these terms
#smoothing parameters: global.Gamm1000$sp, so we can hold the smoothing terms fixed.
global.Gamm1000$sp
mPhysic = gam(Fq_1000Hz ~ s(ice_concentration_20km,bs = "cr",sp = global.Gamm1000$sp[4:5] ) + s(HourlyWindSpeed2int,bs = "cr",sp = global.Gamm1000$sp[6:7]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mPhysic)
devPhysic = 1-(mPhysic$deviance/mPhysic$null.deviance)
mBiolog = gam(Fq_1000Hz ~ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mBiolog)
devBiolog = 1-(mBiolog$deviance/mBiolog$null.deviance)
mAnthro = gam(Fq_1000Hz ~ s(sunAlt,bs = "cr",sp = global.Gamm1000$sp[8:9]) +(nShips),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mAnthro)
devAnthro = 1-(mAnthro$deviance/mAnthro$null.deviance)
mSeason = gam(Fq_1000Hz ~ s(Jul,bs = "cr",sp = global.Gamm1000$sp[1:2]) + s(hour2, bs="cc", sp = global.Gamm1000$sp[3]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mSeason)
devSeason = 1-(mSeason$deviance/mSeason$null.deviance)
ModComp = rbind(ModComp, c("1000Hz", 1-(global.Gamm1000$deviance/global.Gamm1000$null.deviance), devPhysic, devSeason, devBiolog, devAnthro ))
#c. PLOTS to save out for each FQ band
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq1000_modelPerfo.png",
width=600, height=350,pointsize = 12)
par(mfrow=c( 1,1))
pSPL = (predict(global.Gamm1000))
plot(dataModel$Fq_1000Hz, predict(global.Gamm1000), main = "Predicted vs Actual")
dev.off()
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq1000_ConditionalPlots.png")
par(mfrow=c( 3,4))
visreg(global.Gamm1000,main="1000Hz")
dev.off()
#-------------------------
##5) 2000 Hz octave band
#-------------------------
#a. GLOBAL model-- all possible variables
options(na.action = "na.omit")
global.Gamm2000 = gam(Fq_2000Hz ~ s(Jul,bs = "cr") + s(hour2, bs="cc")+
s(ice_concentration_20km,bs = "cr") + s(HourlyWindSpeed2int,bs = "cr") +
s(sunAlt,bs = "cr") + (nShips) +
(Bal)+ (Eba) + (Ubi)+ (Hfa)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
#model evaluation
summary(global.Gamm2000)
anova(global.Gamm2000)#anova provides whole term significance for parametric terms
#par(mfrow=c( 3,3))
#visreg(global.Gamm2000)
p1 = visreg(global.Gamm2000,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (2000 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=10, y=125, label="(A)") + theme_bw()
p2 = visreg(global.Gamm2000,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
p3 = visreg(global.Gamm2000,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
grid.arrange(p1, p2, p3, ncol=3, nrow = 1)
#all significant variables- supplementary info
pJulian = visreg(global.Gamm2000,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (2000 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=18, y=125, label="(A)") + theme_bw()
pHour = visreg(global.Gamm2000,"hour2", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Hour") + ylim(c(70,125))+
annotate("text", x=1, y=125, label="(D)") + theme_bw()
pIce = visreg(global.Gamm2000,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
pWind = visreg(global.Gamm2000,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
pSun = visreg(global.Gamm2000,"sunAlt", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Sun") + ylim(c(70,125))+
annotate("text", x=-0.8, y=125, label="(E)") + theme_bw()
pBal = visreg(global.Gamm2000,"Bal", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Baleen") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(F)") + theme_bw()
pEba = visreg(global.Gamm2000,"Eba", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Eba") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(G)") + theme_bw()
pDle = visreg(global.Gamm2000,"Dle", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Dle") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(H)") + theme_bw()
grid.arrange(pJulian, pWind, pIce, pHour, pSun, pBal, pEba, pDle, ncol=4, nrow = 2)
#dispersion parameter a la Fox and Weisberg 2019 Companion to Applied Regression, dispersion < 1, model not overdispersed (no random variables, so not relevant??)
#https://stackoverflow.com/questions/59342595/how-to-check-for-overdispersion-in-a-gam-with-negative-binomial-distribution
sum(residuals(global.Gamm2000,"pearson")^2)/df.residual(global.Gamm2000)
#autocorrelation
pacf(residuals(global.Gamm2000)) #why does this not change when I add in corelation term to model?
#b. DETERMINE how much of the variability in the dependent variable, using the global model
# explained by each term in the model https://stat.ethz.ch/pipermail/r-help/2007-October/142811.html
# first version, all models include seasonal component... decided to separate out these terms
#smoothing parameters: global.Gamm2000$sp, so we can hold the smoothing terms fixed.
global.Gamm2000$sp
mPhysic = gam(Fq_2000Hz ~ s(ice_concentration_20km,bs = "cr",sp = global.Gamm2000$sp[4:5] ) + s(HourlyWindSpeed2int,bs = "cr",sp = global.Gamm2000$sp[6:7]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mPhysic)
devPhysic = 1-(mPhysic$deviance/mPhysic$null.deviance)
mBiolog = gam(Fq_2000Hz ~(Bal)+ (Eba) + (Ubi)+ (Hfa)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mBiolog)
devBiolog = 1-(mBiolog$deviance/mBiolog$null.deviance)
mAnthro = gam(Fq_2000Hz ~ s(sunAlt,bs = "cr",sp = global.Gamm2000$sp[8:9]) +(nShips),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mAnthro)
devAnthro = 1-(mAnthro$deviance/mAnthro$null.deviance)
mSeason = gam(Fq_2000Hz ~ s(Jul,bs = "cr",sp = global.Gamm2000$sp[1:2]) + s(hour2, bs="cc", sp = global.Gamm2000$sp[3]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mSeason)
devSeason = 1-(mSeason$deviance/mSeason$null.deviance)
ModComp = rbind(ModComp, c("2000Hz", 1-(global.Gamm2000$deviance/global.Gamm2000$null.deviance), devPhysic, devSeason, devBiolog, devAnthro ))
#c. PLOTS to save out for each FQ band
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq2000_modelPerfo.png",
width=600, height=350,pointsize = 12)
par(mfrow=c( 1,1))
pSPL = (predict(global.Gamm2000))
plot(dataModel$Fq_2000Hz, predict(global.Gamm2000), main = "Predicted vs Actual")
dev.off()
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq2000_ConditionalPlots.png")
par(mfrow=c( 3,4))
visreg(global.Gamm2000,main="2000Hz")
dev.off()
#-------------------------
##6) 8000 Hz octave band
#-------------------------
#a. GLOBAL model-- all possible variables
options(na.action = "na.omit")
global.Gamm8000 = gam(Fq_8000Hz ~ s(Jul,bs = "cr") + s(hour2, bs="cc")+
s(ice_concentration_20km,bs = "cr") + s(HourlyWindSpeed2int,bs = "cr") +
s(sunAlt,bs = "cr") + (nShips) +
(Bal)+ (Ubi)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
#model evaluation
summary(global.Gamm8000)
anova(global.Gamm8000)#anova provides whole term significance for parametric terms
#par(mfrow=c( 3,3))
#visreg(global.Gamm8000)
# FIGURE 4-- 500 and 8000 results
p1 = visreg(global.Gamm8000,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (8000 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=20, y=125, label="(A)") + theme_bw()
p2 = visreg(global.Gamm8000,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
p3 = visreg(global.Gamm8000,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
pDle = visreg(global.Gamm8000,"Dle", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Dle") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(D)") + theme_bw()
pJulian5 = visreg(global.Gamm500,"Jul", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "Predicted SPL (500 Hz model)") + xlab( "Julian day") + ylim(c(70,125)) +
annotate("text", x=20, y=125, label="(A)") + theme_bw()
pIce5 = visreg(global.Gamm500,"ice_concentration_20km", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Ice concentration") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(C)") + theme_bw()
pWind5 = visreg(global.Gamm500,"HourlyWindSpeed2int" , gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Wind Speed [kts]") + ylim(c(70,125)) +
annotate("text", x=2, y=125, label="(B)") + theme_bw()
pBal5 = visreg(global.Gamm500,"Bal", gg=TRUE, points=list(size=.2,alpha = .2),line=list(col="black")) +
ylab( "") + xlab( "Baleen") + ylim(c(70,125))+
annotate("text", x=.1, y=125, label="(D)") + theme_bw()
grid.arrange(pJulian5, pWind5, pIce5, pBal5, p1,p2,p3,pDle, ncol=4, nrow = 2)
#grid.arrange(pJulian5, p1, pWind5, p2, pIce5, p3, pBal5,pDle, ncol=2, nrow = 4)
#dispersion parameter a la Fox and Weisberg 2019 Companion to Applied Regression, dispersion < 1, model not overdispersed (no random variables, so not relevant??)
#https://stackoverflow.com/questions/59342595/how-to-check-for-overdispersion-in-a-gam-with-negative-binomial-distribution
sum(residuals(global.Gamm8000,"pearson")^2)/df.residual(global.Gamm8000)
#autocorrelation
pacf(residuals(global.Gamm8000)) #why does this not change when I add in corelation term to model?
#b. DETERMINE how much of the variability in the dependent variable, using the global model
# explained by each term in the model https://stat.ethz.ch/pipermail/r-help/2007-October/142811.html
# first version, all models include seasonal component... decided to separate out these terms
#smoothing parameters:global.Gamm8000$sp, so we can hold the smoothing terms fixed.
global.Gamm8000$sp
mPhysic = gam(Fq_8000Hz ~ s(ice_concentration_20km,bs = "cr",sp = global.Gamm8000$sp[4:5] ) + s(HourlyWindSpeed2int,bs = "cr",sp = global.Gamm8000$sp[6:7]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mPhysic)
devPhysic = 1-(mPhysic$deviance/mPhysic$null.deviance)
mBiolog = gam(Fq_8000Hz ~ (Bal)+ (Ubi)+ (Dle),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mBiolog)
devBiolog = 1-(mBiolog$deviance/mBiolog$null.deviance)
mAnthro = gam(Fq_8000Hz ~ s(sunAlt,bs = "cr",sp = global.Gamm8000$sp[8:9]) +(nShips),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mAnthro)
devAnthro = 1-(mAnthro$deviance/mAnthro$null.deviance)
mSeason = gam(Fq_8000Hz ~ s(Jul,bs = "cr",sp = global.Gamm8000$sp[1:2]) + s(hour2, bs="cc", sp = global.Gamm8000$sp[3]),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(mSeason)
devSeason = 1-(mSeason$deviance/mSeason$null.deviance)
ModComp = rbind(ModComp, c("8000Hz", 1-(global.Gamm8000$deviance/global.Gamm8000$null.deviance), devPhysic, devSeason, devBiolog, devAnthro ))
#c. PLOTS to save out for each FQ band
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq8000_modelPerfo.png",
width=600, height=350,pointsize = 12)
par(mfrow=c( 1,1))
pSPL = (predict(global.Gamm8000))
plot(dataModel$Fq_8000Hz, predict(global.Gamm8000), main = "Predicted vs Actual")
dev.off()
png(file="D:\\RESEARCH\\COA_Projects\\2020_COA_WCS\\figures\\Fq8000_ConditionalPlots.png")
par(mfrow=c( 3,4))
visreg(global.Gamm8000,main="8000Hz")
dev.off()
#-------------------------
##PLOT model resuts as a funciton of frequeny band to show which bands are dominated by what source category
#-------------------------
colnames(ModComp) = c("Frequency","Global","Abiotic","Seasonal","Biological","Anthropogenic")
ModComp = as.data.frame(ModComp)
ModComp$Global = as.numeric(as.character( ModComp$Global) )
ModComp$Physical = as.numeric(as.character( ModComp$Physical) )
ModComp$Seasonal = as.numeric(as.character( ModComp$Seasonal) )
ModComp$Biological = as.numeric(as.character( ModComp$Biological) )
ModComp$Anthropogenic = as.numeric(as.character( ModComp$Anthropogenic) )
means.long<-reshape2::melt(ModComp,id.vars="Frequency")
positions <- c("125Hz", "250Hz", "500Hz", "1000Hz", "2000Hz", "8000Hz")
#stacked-- not what I want because does a total, unless I remove global model from it
dev.off()
ggplot(means.long,aes(x=Frequency,y=value,fill=factor(variable)))+
#geom_bar(position = "fill", stat = "identity") +
geom_bar( stat = "identity", colour="black") +
scale_x_discrete(limits = positions)+
scale_fill_discrete(name="Source Category")+
xlab("")+ ylab("Deviance explained")
#side-by-side-- use this!
ggplot(means.long,aes(x=Frequency,y=value,fill=factor(variable)))+
geom_bar(stat="identity",position="dodge",colour="black")+
scale_x_discrete(limits = positions) +
xlab("")+ ylab("Deviance explained")+
scale_fill_manual(name="Soundscape category models", values = c("black", "dark gray","#A3A500","#00BFC4","#F8766D"))+
theme(legend.position = c(0.14, 0.8))
#pink, green, blue, red
c("#00BFC4","#F8766D","black")
hue_pal()(5)
#Copy to spreadsheet for mabuscript tables
#Global model results
summary(global.Gamm125)
summary(global.Gamm250)
summary(global.Gamm500)
summary(global.Gamm1000)
summary(global.Gamm2000)
summary(global.Gamm8000)
#-------------------------
# exploring other model strutures with temporal variables as "by" variable
#-------------------------
dataModel$month
global.Gamm125 = gam(Fq_125Hz ~
s(ice_concentration_20km, bs = "cr", by = (mthS)) + s(HourlyWindSpeed2int, bs = "cr", by = mthS) +
s(sunAlt,bs = "cr") + (nShips) +
(Bmy)+ (Bal)+ (Eba) + (Oro) + (Ubi)+ (Hfa),
correlation=corCAR1(value=corACI,form=~dataTime),
data=dataModel, method="REML", select=TRUE, gamma=1.4,na.rm = TRUE)
summary(global.Gamm125)
visreg(global.Gamm125,main="8000Hz")
plot(global.Gamm125)
#REMOVED: s(Jul,bs = "cr") + s(hour2, bs="cc")+
|
context("Confirmatory Factor Analysis")
# 3-factor run
options <- jaspTools::analysisOptions("ConfirmatoryFactorAnalysis")
options$groupvar <- ""
options$invariance <- "configural"
options$mimic <- "lavaan"
options$se <- "standard"
options$estimator <- "default"
options$std <- "none"
options$factors <- list(
list(indicators = list("x1", "x2", "x3"), name = "Factor1", title = "Factor 1"),
list(indicators = list("x4", "x5", "x6"), name = "Factor2", title = "Factor 2"),
list(indicators = list("x7", "x8", "x9"), name = "Factor3", title = "Factor 3")
)
options$identify <- "factor"
options$missing <- "FIML"
set.seed(1)
results <- jaspTools::runAnalysis("ConfirmatoryFactorAnalysis", "holzingerswineford.csv", options)
test_that("[CFA 3-Factor] Factor Covariances table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_fc"]][["data"]]
jaspTools::expect_equal_tables(table,
list(0.333504675491998, 0.583514923484581, 0.45850979948829, "Factor 1",
"<unicode><unicode><unicode>", 6.52589093874667e-13, "Factor 2",
0.063779296447443, 7.18900685689003, 0.327797106505315, 0.613272259288624,
0.47053468289697, "Factor 1", "<unicode><unicode><unicode>",
1.03996145028873e-10, "Factor 3", 0.0728266322838328, 6.46102487704112,
0.148278758433621, 0.417692356258714, 0.282985557346168, "Factor 2",
"<unicode><unicode><unicode>", 3.83174073319559e-05, "Factor 3",
0.0687292215444246, 4.11739797115633))
})
test_that("[CFA 3-Factor] Factor loadings table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_fl1"]][["data"]]
jaspTools::expect_equal_tables(table,
list(0.741164020131046, 1.05807660760393, 0.899620313867488, "<unicode><unicode>11",
"Factor 1", 0, "x1", 0.0808465333987386, 11.1275063512064, 0.346131928251774,
0.649749093967046, 0.49794051110941, "<unicode><unicode>12",
"Factor 1", 1.28623778294923e-10, "x2", 0.0774547818506273,
6.42878979466621, 0.510293170576109, 0.802019014680793, 0.656156092628451,
"<unicode><unicode>13", "Factor 1", 0, "x3", 0.0744212256974568,
8.81678696472846, 0.878687521771487, 1.1006993764173, 0.989693449094392,
"<unicode><unicode>21", "Factor 2", 0, "x4", 0.0566367179185465,
17.4744138690689, 0.978762425338572, 1.22444687472433, 1.10160465003145,
"<unicode><unicode>22", "Factor 2", 0, "x5", 0.0626757561168699,
17.5762482701815, 0.811432261987188, 1.02176969353156, 0.916600977759373,
"<unicode><unicode>23", "Factor 2", 0, "x6", 0.0536584940344529,
17.0821226769958, 0.483096088879332, 0.75585477823652, 0.619475433557926,
"<unicode><unicode>31", "Factor 3", 0, "x7", 0.0695825769015842,
8.90273774186456, 0.601768916407813, 0.860128689422337, 0.730948802915075,
"<unicode><unicode>32", "Factor 3", 0, "x8", 0.0659093164600047,
11.0902197469857, 0.54254918241612, 0.797411035146398, 0.669980108781259,
"<unicode><unicode>33", "Factor 3", 0, "x9", 0.0650169734598685,
10.3046954222623))
})
test_that("[CFA 3-Factor] Factor variances table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_fv"]][["data"]]
jaspTools::expect_equal_tables(table,
list(1, 1, 1, "Factor 1", "", 0, "", 1, 1, 1, "Factor 2", "", 0, "",
1, 1, 1, "Factor 3", "", 0, ""))
})
test_that("[CFA 3-Factor] Residual variances table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_rv"]][["data"]]
jaspTools::expect_equal_tables(table,
list(0.326399894319287, 0.771706936936134, 0.549053415627711, "x1",
1.34368055770828e-06, 0.113600822803218, 4.83318168019605, 0.934462587447057,
1.33321007058646, 1.13383632901676, "x2", 0, 0.10172316590628,
11.1462941495686, 0.666706476085054, 1.02194282083783, 0.844324648461441,
"x3", 0, 0.0906231817407956, 9.31687270566615, 0.277647747273574,
0.464697982595281, 0.371172864934427, "x4", 7.32747196252603e-15,
0.047717773591029, 7.77850341710428, 0.331807290171594, 0.560702710555418,
0.446255000363506, "x5", 2.1316282072803e-14, 0.0583927618541264,
7.64229993913142, 0.271855645519897, 0.44054959934542, 0.356202622432658,
"x6", 2.22044604925031e-16, 0.0430349626718039, 8.27705196700539,
0.639885213665674, 0.958894859526553, 0.799390036596114, "x7",
0, 0.0813815071034943, 9.82274800563124, 0.342279857209668,
0.63311496350216, 0.487697410355914, "x8", 4.92208496183366e-11,
0.0741939924882706, 6.57327357646934, 0.427489578257306, 0.704773115754749,
0.566131347006028, "x9", 1.11022302462516e-15, 0.0707368961074339,
8.00333882541577))
})
test_that("[CFA 3-Factor] Chi-square test table results match", {
table <- results[["results"]][["maincontainer"]][["collection"]][["maincontainer_cfatab"]][["data"]]
jaspTools::expect_equal_tables(table,
list(918.851589292384, 36, "Baseline model", "", 85.305521772505, 24,
"Factor model", 8.50255310602677e-09))
})
# Second-order factor
options <- jaspTools::analysisOptions("ConfirmatoryFactorAnalysis")
options$secondOrder <- list("Factor 1", "Factor 2", "Factor 3")
options$groupvar <- ""
options$invariance <- "configural"
options$mimic <- "lavaan"
options$se <- "standard"
options$estimator <- "default"
options$std <- "none"
options$factors <- list(
list(indicators = list("x1", "x2", "x3"), name = "Factor1", title = "Factor 1"),
list(indicators = list("x4", "x5", "x6"), name = "Factor2", title = "Factor 2"),
list(indicators = list("x7", "x8", "x9"), name = "Factor3", title = "Factor 3")
)
options$identify <- "factor"
options$missing <- "FIML"
set.seed(1)
results <- jaspTools::runAnalysis("ConfirmatoryFactorAnalysis", "holzingerswineford.csv", options)
test_that("[CFA Second order] Factor loadings table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_fl1"]][["data"]]
jaspTools::expect_equal_tables(table,
list(0.0577039904414908, 0.819337251278242, 0.438520620859867, "<unicode><unicode>11",
"Factor 1", 0.0240111123688183, "x1", 0.194297769460158, 2.25695139001474,
0.0315641080012427, 0.453878987089273, 0.242721547545258, "<unicode><unicode>12",
"Factor 1", 0.0242627980684818, "x2", 0.107735367185111, 2.25294212928438,
0.0503061064249717, 0.589382650102084, 0.319844378263528, "<unicode><unicode>13",
"Factor 1", 0.0200309356495567, "x3", 0.137522053448247, 2.32576790590095,
0.71768085442071, 0.966830831063184, 0.842255842741947, "<unicode><unicode>21",
"Factor 2", 0, "x4", 0.0635598354377267, 13.2513848870354, 0.799268757944165,
1.07572151104007, 0.937495134492118, "<unicode><unicode>22",
"Factor 2", 0, "x5", 0.0705249574167, 13.2930974910468, 0.663204523777401,
0.896899759208491, 0.780052141492946, "<unicode><unicode>23",
"Factor 2", 0, "x6", 0.0596172269680586, 13.0843412410121, 0.392491072303089,
0.651164920657023, 0.521827996480056, "<unicode><unicode>31",
"Factor 3", 2.66453525910038e-15, "x7", 0.0659894391923322,
7.90775013194371, 0.483529756039699, 0.747933394180798, 0.615731575110249,
"<unicode><unicode>32", "Factor 3", 0, "x8", 0.0674511471197128,
9.12855602021777, 0.438789772679437, 0.689958088778821, 0.564373930729129,
"<unicode><unicode>33", "Factor 3", 0, "x9", 0.0640747274135055,
8.80805823935775))
})
test_that("[CFA Second order] Second-order factor loadings table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_fl2"]][["data"]]
jaspTools::expect_equal_tables(table,
list(-0.149104054075191, 3.73161619230762, 1.79125606911621, "<unicode><unicode>11",
"SecondOrder", 0.0703961021446211, "Factor 1", 0.989997846132234,
1.80935350123676, 0.364985992598308, 0.869100374746346, 0.617043183672327,
"<unicode><unicode>12", "SecondOrder", 1.6021965671964e-06,
"Factor 2", 0.128602970800593, 4.79804766430388, 0.360410276598517,
0.919053360468363, 0.63973181853344, "<unicode><unicode>13",
"SecondOrder", 7.15860527811252e-06, "Factor 3", 0.142513609504142,
4.48891737960541))
})
test_that("[CFA Second order] Factor variances table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_fv"]][["data"]]
jaspTools::expect_equal_tables(table,
list(1, 1, 1, "Factor 1", "", 0, "", 1, 1, 1, "Factor 2", "", 0, "",
1, 1, 1, "Factor 3", "", 0, "", 1, 1, 1, "Second-Order", "",
0, ""))
})
test_that("[CFA Second order] Residual variances table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_rv"]][["data"]]
jaspTools::expect_equal_tables(table,
list(0.326401450955574, 0.771708269797423, 0.549054860376498, "x1",
1.34357828596165e-06, 0.11360076571671, 4.83319682673349, 0.934464040890642,
1.33321206655867, 1.13383805372466, "x2", 0, 0.101723304308982,
11.1462959390372, 0.666705522978852, 1.02194196604298, 0.844323744510915,
"x3", 0, 0.0906232068206831, 9.3168601524065, 0.2776477911548,
0.464698082724097, 0.371172936939448, "x4", 7.32747196252603e-15,
0.0477177879401676, 7.77850258701964, 0.331807404885689, 0.560702857278213,
0.446255131081951, "x5", 2.1316282072803e-14, 0.0583927700197611,
7.64230110904024, 0.271855604188449, 0.44054957527582, 0.356202589732134,
"x6", 2.22044604925031e-16, 0.0430349670754176, 8.27705036018498,
0.63988809304374, 0.958898264282657, 0.799393178663198, "x7",
0, 0.0813816411309667, 9.82277043758238, 0.342280111000572,
0.633115637435664, 0.487697874218118, "x8", 4.92219598413612e-11,
0.0741940996694749, 6.57327033269154, 0.427488355941731, 0.704772286642682,
0.566130321292206, "x9", 1.11022302462516e-15, 0.0707369964162943,
8.00331297586447))
})
test_that("[CFA Second order] Chi-square test table results match", {
table <- results[["results"]][["maincontainer"]][["collection"]][["maincontainer_cfatab"]][["data"]]
jaspTools::expect_equal_tables(table,
list(918.851589292384, 36, "Baseline model", "", 85.3055217707089,
24, "Factor model", 8.50255321704907e-09))
})
|
/tests/testthat/test-confirmatoryfactoranalysis.R
|
no_license
|
TimKDJ/jaspFactor
|
R
| false
| false
| 12,266
|
r
|
context("Confirmatory Factor Analysis")
# 3-factor run
options <- jaspTools::analysisOptions("ConfirmatoryFactorAnalysis")
options$groupvar <- ""
options$invariance <- "configural"
options$mimic <- "lavaan"
options$se <- "standard"
options$estimator <- "default"
options$std <- "none"
options$factors <- list(
list(indicators = list("x1", "x2", "x3"), name = "Factor1", title = "Factor 1"),
list(indicators = list("x4", "x5", "x6"), name = "Factor2", title = "Factor 2"),
list(indicators = list("x7", "x8", "x9"), name = "Factor3", title = "Factor 3")
)
options$identify <- "factor"
options$missing <- "FIML"
set.seed(1)
results <- jaspTools::runAnalysis("ConfirmatoryFactorAnalysis", "holzingerswineford.csv", options)
test_that("[CFA 3-Factor] Factor Covariances table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_fc"]][["data"]]
jaspTools::expect_equal_tables(table,
list(0.333504675491998, 0.583514923484581, 0.45850979948829, "Factor 1",
"<unicode><unicode><unicode>", 6.52589093874667e-13, "Factor 2",
0.063779296447443, 7.18900685689003, 0.327797106505315, 0.613272259288624,
0.47053468289697, "Factor 1", "<unicode><unicode><unicode>",
1.03996145028873e-10, "Factor 3", 0.0728266322838328, 6.46102487704112,
0.148278758433621, 0.417692356258714, 0.282985557346168, "Factor 2",
"<unicode><unicode><unicode>", 3.83174073319559e-05, "Factor 3",
0.0687292215444246, 4.11739797115633))
})
test_that("[CFA 3-Factor] Factor loadings table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_fl1"]][["data"]]
jaspTools::expect_equal_tables(table,
list(0.741164020131046, 1.05807660760393, 0.899620313867488, "<unicode><unicode>11",
"Factor 1", 0, "x1", 0.0808465333987386, 11.1275063512064, 0.346131928251774,
0.649749093967046, 0.49794051110941, "<unicode><unicode>12",
"Factor 1", 1.28623778294923e-10, "x2", 0.0774547818506273,
6.42878979466621, 0.510293170576109, 0.802019014680793, 0.656156092628451,
"<unicode><unicode>13", "Factor 1", 0, "x3", 0.0744212256974568,
8.81678696472846, 0.878687521771487, 1.1006993764173, 0.989693449094392,
"<unicode><unicode>21", "Factor 2", 0, "x4", 0.0566367179185465,
17.4744138690689, 0.978762425338572, 1.22444687472433, 1.10160465003145,
"<unicode><unicode>22", "Factor 2", 0, "x5", 0.0626757561168699,
17.5762482701815, 0.811432261987188, 1.02176969353156, 0.916600977759373,
"<unicode><unicode>23", "Factor 2", 0, "x6", 0.0536584940344529,
17.0821226769958, 0.483096088879332, 0.75585477823652, 0.619475433557926,
"<unicode><unicode>31", "Factor 3", 0, "x7", 0.0695825769015842,
8.90273774186456, 0.601768916407813, 0.860128689422337, 0.730948802915075,
"<unicode><unicode>32", "Factor 3", 0, "x8", 0.0659093164600047,
11.0902197469857, 0.54254918241612, 0.797411035146398, 0.669980108781259,
"<unicode><unicode>33", "Factor 3", 0, "x9", 0.0650169734598685,
10.3046954222623))
})
test_that("[CFA 3-Factor] Factor variances table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_fv"]][["data"]]
jaspTools::expect_equal_tables(table,
list(1, 1, 1, "Factor 1", "", 0, "", 1, 1, 1, "Factor 2", "", 0, "",
1, 1, 1, "Factor 3", "", 0, ""))
})
test_that("[CFA 3-Factor] Residual variances table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_rv"]][["data"]]
jaspTools::expect_equal_tables(table,
list(0.326399894319287, 0.771706936936134, 0.549053415627711, "x1",
1.34368055770828e-06, 0.113600822803218, 4.83318168019605, 0.934462587447057,
1.33321007058646, 1.13383632901676, "x2", 0, 0.10172316590628,
11.1462941495686, 0.666706476085054, 1.02194282083783, 0.844324648461441,
"x3", 0, 0.0906231817407956, 9.31687270566615, 0.277647747273574,
0.464697982595281, 0.371172864934427, "x4", 7.32747196252603e-15,
0.047717773591029, 7.77850341710428, 0.331807290171594, 0.560702710555418,
0.446255000363506, "x5", 2.1316282072803e-14, 0.0583927618541264,
7.64229993913142, 0.271855645519897, 0.44054959934542, 0.356202622432658,
"x6", 2.22044604925031e-16, 0.0430349626718039, 8.27705196700539,
0.639885213665674, 0.958894859526553, 0.799390036596114, "x7",
0, 0.0813815071034943, 9.82274800563124, 0.342279857209668,
0.63311496350216, 0.487697410355914, "x8", 4.92208496183366e-11,
0.0741939924882706, 6.57327357646934, 0.427489578257306, 0.704773115754749,
0.566131347006028, "x9", 1.11022302462516e-15, 0.0707368961074339,
8.00333882541577))
})
test_that("[CFA 3-Factor] Chi-square test table results match", {
table <- results[["results"]][["maincontainer"]][["collection"]][["maincontainer_cfatab"]][["data"]]
jaspTools::expect_equal_tables(table,
list(918.851589292384, 36, "Baseline model", "", 85.305521772505, 24,
"Factor model", 8.50255310602677e-09))
})
# Second-order factor
options <- jaspTools::analysisOptions("ConfirmatoryFactorAnalysis")
options$secondOrder <- list("Factor 1", "Factor 2", "Factor 3")
options$groupvar <- ""
options$invariance <- "configural"
options$mimic <- "lavaan"
options$se <- "standard"
options$estimator <- "default"
options$std <- "none"
options$factors <- list(
list(indicators = list("x1", "x2", "x3"), name = "Factor1", title = "Factor 1"),
list(indicators = list("x4", "x5", "x6"), name = "Factor2", title = "Factor 2"),
list(indicators = list("x7", "x8", "x9"), name = "Factor3", title = "Factor 3")
)
options$identify <- "factor"
options$missing <- "FIML"
set.seed(1)
results <- jaspTools::runAnalysis("ConfirmatoryFactorAnalysis", "holzingerswineford.csv", options)
test_that("[CFA Second order] Factor loadings table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_fl1"]][["data"]]
jaspTools::expect_equal_tables(table,
list(0.0577039904414908, 0.819337251278242, 0.438520620859867, "<unicode><unicode>11",
"Factor 1", 0.0240111123688183, "x1", 0.194297769460158, 2.25695139001474,
0.0315641080012427, 0.453878987089273, 0.242721547545258, "<unicode><unicode>12",
"Factor 1", 0.0242627980684818, "x2", 0.107735367185111, 2.25294212928438,
0.0503061064249717, 0.589382650102084, 0.319844378263528, "<unicode><unicode>13",
"Factor 1", 0.0200309356495567, "x3", 0.137522053448247, 2.32576790590095,
0.71768085442071, 0.966830831063184, 0.842255842741947, "<unicode><unicode>21",
"Factor 2", 0, "x4", 0.0635598354377267, 13.2513848870354, 0.799268757944165,
1.07572151104007, 0.937495134492118, "<unicode><unicode>22",
"Factor 2", 0, "x5", 0.0705249574167, 13.2930974910468, 0.663204523777401,
0.896899759208491, 0.780052141492946, "<unicode><unicode>23",
"Factor 2", 0, "x6", 0.0596172269680586, 13.0843412410121, 0.392491072303089,
0.651164920657023, 0.521827996480056, "<unicode><unicode>31",
"Factor 3", 2.66453525910038e-15, "x7", 0.0659894391923322,
7.90775013194371, 0.483529756039699, 0.747933394180798, 0.615731575110249,
"<unicode><unicode>32", "Factor 3", 0, "x8", 0.0674511471197128,
9.12855602021777, 0.438789772679437, 0.689958088778821, 0.564373930729129,
"<unicode><unicode>33", "Factor 3", 0, "x9", 0.0640747274135055,
8.80805823935775))
})
test_that("[CFA Second order] Second-order factor loadings table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_fl2"]][["data"]]
jaspTools::expect_equal_tables(table,
list(-0.149104054075191, 3.73161619230762, 1.79125606911621, "<unicode><unicode>11",
"SecondOrder", 0.0703961021446211, "Factor 1", 0.989997846132234,
1.80935350123676, 0.364985992598308, 0.869100374746346, 0.617043183672327,
"<unicode><unicode>12", "SecondOrder", 1.6021965671964e-06,
"Factor 2", 0.128602970800593, 4.79804766430388, 0.360410276598517,
0.919053360468363, 0.63973181853344, "<unicode><unicode>13",
"SecondOrder", 7.15860527811252e-06, "Factor 3", 0.142513609504142,
4.48891737960541))
})
test_that("[CFA Second order] Factor variances table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_fv"]][["data"]]
jaspTools::expect_equal_tables(table,
list(1, 1, 1, "Factor 1", "", 0, "", 1, 1, 1, "Factor 2", "", 0, "",
1, 1, 1, "Factor 3", "", 0, "", 1, 1, 1, "Second-Order", "",
0, ""))
})
test_that("[CFA Second order] Residual variances table results match", {
table <- results[["results"]][["estimates"]][["collection"]][["estimates_rv"]][["data"]]
jaspTools::expect_equal_tables(table,
list(0.326401450955574, 0.771708269797423, 0.549054860376498, "x1",
1.34357828596165e-06, 0.11360076571671, 4.83319682673349, 0.934464040890642,
1.33321206655867, 1.13383805372466, "x2", 0, 0.101723304308982,
11.1462959390372, 0.666705522978852, 1.02194196604298, 0.844323744510915,
"x3", 0, 0.0906232068206831, 9.3168601524065, 0.2776477911548,
0.464698082724097, 0.371172936939448, "x4", 7.32747196252603e-15,
0.0477177879401676, 7.77850258701964, 0.331807404885689, 0.560702857278213,
0.446255131081951, "x5", 2.1316282072803e-14, 0.0583927700197611,
7.64230110904024, 0.271855604188449, 0.44054957527582, 0.356202589732134,
"x6", 2.22044604925031e-16, 0.0430349670754176, 8.27705036018498,
0.63988809304374, 0.958898264282657, 0.799393178663198, "x7",
0, 0.0813816411309667, 9.82277043758238, 0.342280111000572,
0.633115637435664, 0.487697874218118, "x8", 4.92219598413612e-11,
0.0741940996694749, 6.57327033269154, 0.427488355941731, 0.704772286642682,
0.566130321292206, "x9", 1.11022302462516e-15, 0.0707369964162943,
8.00331297586447))
})
test_that("[CFA Second order] Chi-square test table results match", {
table <- results[["results"]][["maincontainer"]][["collection"]][["maincontainer_cfatab"]][["data"]]
jaspTools::expect_equal_tables(table,
list(918.851589292384, 36, "Baseline model", "", 85.3055217707089,
24, "Factor model", 8.50255321704907e-09))
})
|
shinyUI(
pageWithSidebar(
headerPanel("Calculate Your Body Mass Index"),
sidebarPanel(
helpText('Enter your weight and height using standard (pounds & inches ) or
metric (kilograms & centimeters) measures.'),
radioButtons("measure", "Measure Type",
c("Standard" = "std",
"Metric" = "met"), inline = TRUE, width = NULL),
numericInput('weight', 'Your Weight (pounds or kilograms)', 120, min = 0, max = 1000, step = 1),
numericInput('height', 'Your Height (inches or centimeters)', 65, min = 12, max = 108, step = 1),
submitButton('Compute BMI')
),
mainPanel(
tabsetPanel(
tabPanel('Standard',
h3('Your BMI'),
verbatimTextOutput("bmi"),
h3('BMI Category'),
verbatimTextOutput("label"),
h5('BMI Weight categories:'),
h6('1.underweight (BMI less than 18.5)'),
h6('2.normal weight (BMI between 18.5 & 24.9)'),
h6('3.overweight (BMI between 25.0 & 29.9)'),
h6('4.obese (BMI 30.0 and above)')),
tabPanel('Metric',
h3('Your BMI'),
verbatimTextOutput("bmimetric"),
h3('BMI Category'),
verbatimTextOutput("label2"),
h5('BMI Weight categories:'),
h6('1.underweight (BMI less than 18.5)'),
h6('2.normal weight (BMI between 18.5 & 24.9)'),
h6('3.overweight (BMI between 25.0 & 29.9)'),
h6('4.obese (BMI 30.0 and above)'))
)
)
))
|
/ui.R
|
no_license
|
trelo/BMIapp
|
R
| false
| false
| 1,516
|
r
|
shinyUI(
pageWithSidebar(
headerPanel("Calculate Your Body Mass Index"),
sidebarPanel(
helpText('Enter your weight and height using standard (pounds & inches ) or
metric (kilograms & centimeters) measures.'),
radioButtons("measure", "Measure Type",
c("Standard" = "std",
"Metric" = "met"), inline = TRUE, width = NULL),
numericInput('weight', 'Your Weight (pounds or kilograms)', 120, min = 0, max = 1000, step = 1),
numericInput('height', 'Your Height (inches or centimeters)', 65, min = 12, max = 108, step = 1),
submitButton('Compute BMI')
),
mainPanel(
tabsetPanel(
tabPanel('Standard',
h3('Your BMI'),
verbatimTextOutput("bmi"),
h3('BMI Category'),
verbatimTextOutput("label"),
h5('BMI Weight categories:'),
h6('1.underweight (BMI less than 18.5)'),
h6('2.normal weight (BMI between 18.5 & 24.9)'),
h6('3.overweight (BMI between 25.0 & 29.9)'),
h6('4.obese (BMI 30.0 and above)')),
tabPanel('Metric',
h3('Your BMI'),
verbatimTextOutput("bmimetric"),
h3('BMI Category'),
verbatimTextOutput("label2"),
h5('BMI Weight categories:'),
h6('1.underweight (BMI less than 18.5)'),
h6('2.normal weight (BMI between 18.5 & 24.9)'),
h6('3.overweight (BMI between 25.0 & 29.9)'),
h6('4.obese (BMI 30.0 and above)'))
)
)
))
|
#!/usr/bin/env Rscript
if (!require("optparse", quietly = TRUE)) {
install.packages("optparse", repos="http://cran.rstudio.com/")
if (!library("optparse")) {
stop("Couldn't load 'optparse'.")
}
}
parser <- OptionParser()
parser <- add_option(parser, c("-v", "--verbose"), action="store_true",
default=FALSE, help="Print extra output [default]")
parser <- add_option(parser, c("-d", "--driver"),
default="x11", help="Output in a choosen driver instead")
parser <- add_option(parser, c("-i", "--ifile"),
default="stdin", help="File to plot")
parser <- add_option(parser, c("-o", "--ofile"),
default="Rplots", help="Plot to file")
parser <- add_option(parser, c("-t", "--type"),
default="l", help="Type of plot (p -> points, l -> lines, o -> overplotted points and lines, b-c -> points (empty if \"c\") joined by lines), s-S -> stair steps, h -> histogram-like vertical lines, n -> does not produce any points or lines")
parser <- add_option(parser, c("--xlab"),
default="x", help="Define label for absciss")
parser <- add_option(parser, c("--ylab"),
default="y", help="Define label for ordinate")
parser <- add_option(parser, c("-c", "--col"),
default="black", help="Define color")
options <- parse_args(parser)
if (options$driver == "")
options$driver = "pdf"
if (options$driver == "x11") {
x11()
} else {
# if (options$verbose)
# write(str(get(options$driver)), stdout())
# dev.copy(match.fun(options$driver), options$ofile)
match.fun(options$driver)(options$ofile)
}
if (options$verbose)
write(str(options), stdout())
y <- scan(file=options$ifile, quiet=TRUE)
if (options$verbose) {
write(paste("Length: ", length(y)), stdout())
write("Array:", stdout())
write(y, stdout())
}
if (length(y) > 0)
plot (0:(length(y) - 1), y, type=options$type, xlab=options$xlab, ylab=options$ylab, col=options$col)
if (options$driver == "x11") {
while (!is.null(dev.list())) Sys.sleep(1)
} else {
dev.off()
}
|
/Templates/R-lang.R
|
no_license
|
Hellfar/baseConfig
|
R
| false
| false
| 2,064
|
r
|
#!/usr/bin/env Rscript
if (!require("optparse", quietly = TRUE)) {
install.packages("optparse", repos="http://cran.rstudio.com/")
if (!library("optparse")) {
stop("Couldn't load 'optparse'.")
}
}
parser <- OptionParser()
parser <- add_option(parser, c("-v", "--verbose"), action="store_true",
default=FALSE, help="Print extra output [default]")
parser <- add_option(parser, c("-d", "--driver"),
default="x11", help="Output in a choosen driver instead")
parser <- add_option(parser, c("-i", "--ifile"),
default="stdin", help="File to plot")
parser <- add_option(parser, c("-o", "--ofile"),
default="Rplots", help="Plot to file")
parser <- add_option(parser, c("-t", "--type"),
default="l", help="Type of plot (p -> points, l -> lines, o -> overplotted points and lines, b-c -> points (empty if \"c\") joined by lines), s-S -> stair steps, h -> histogram-like vertical lines, n -> does not produce any points or lines")
parser <- add_option(parser, c("--xlab"),
default="x", help="Define label for absciss")
parser <- add_option(parser, c("--ylab"),
default="y", help="Define label for ordinate")
parser <- add_option(parser, c("-c", "--col"),
default="black", help="Define color")
options <- parse_args(parser)
if (options$driver == "")
options$driver = "pdf"
if (options$driver == "x11") {
x11()
} else {
# if (options$verbose)
# write(str(get(options$driver)), stdout())
# dev.copy(match.fun(options$driver), options$ofile)
match.fun(options$driver)(options$ofile)
}
if (options$verbose)
write(str(options), stdout())
y <- scan(file=options$ifile, quiet=TRUE)
if (options$verbose) {
write(paste("Length: ", length(y)), stdout())
write("Array:", stdout())
write(y, stdout())
}
if (length(y) > 0)
plot (0:(length(y) - 1), y, type=options$type, xlab=options$xlab, ylab=options$ylab, col=options$col)
if (options$driver == "x11") {
while (!is.null(dev.list())) Sys.sleep(1)
} else {
dev.off()
}
|
#library(FirebrowseR)
context("Metadata.ClinicalNames")
test_that("CDEs are retrieved correctly", {
format = "csv"
obj = Metadata.ClinicalNames(format = format)
expect_equal(nrow(obj), 64)
expect_equal(ncol(obj), 1)
format = "tsv"
obj = Metadata.ClinicalNames(format = format)
expect_equal(nrow(obj), 64)
expect_equal(ncol(obj), 1)
})
|
/tests/testthat/test.Metadata.ClinicalNames.R
|
no_license
|
ivan-krukov/FirebrowseR
|
R
| false
| false
| 357
|
r
|
#library(FirebrowseR)
context("Metadata.ClinicalNames")
test_that("CDEs are retrieved correctly", {
format = "csv"
obj = Metadata.ClinicalNames(format = format)
expect_equal(nrow(obj), 64)
expect_equal(ncol(obj), 1)
format = "tsv"
obj = Metadata.ClinicalNames(format = format)
expect_equal(nrow(obj), 64)
expect_equal(ncol(obj), 1)
})
|
#' @include PredictResult.R
PerformanceResult = R6Class("PerformanceResult",
inherit = PredictResult,
cloneable = FALSE,
public = list(
initialize = function(pred.result, measures, perf.vals) {
assertR6(pred.result, "PredictResult")
assertList(measures, "Measure")
self$data = pred.result$data
self$data[, "measures" := list(measures)]
self$data[, "perf.vals" := list(list(assertNumeric(perf.vals, min.len = 1L)))]
},
print = function(...) {
gcat("[Performance]: task={self$task$id} | learner={self$learner$id} | {stri_peek(stri_pasteNames(self$perf.vals, sep = '='))}")
if (getOption("mlrng.debug", FALSE))
cat("\n", format(self), "\n")
}
),
active = list(
perf.vals = function() self$data$perf.vals[[1L]]
)
)
|
/R/PerformanceResult.R
|
no_license
|
mlr-archive/mlrng
|
R
| false
| false
| 794
|
r
|
#' @include PredictResult.R
PerformanceResult = R6Class("PerformanceResult",
inherit = PredictResult,
cloneable = FALSE,
public = list(
initialize = function(pred.result, measures, perf.vals) {
assertR6(pred.result, "PredictResult")
assertList(measures, "Measure")
self$data = pred.result$data
self$data[, "measures" := list(measures)]
self$data[, "perf.vals" := list(list(assertNumeric(perf.vals, min.len = 1L)))]
},
print = function(...) {
gcat("[Performance]: task={self$task$id} | learner={self$learner$id} | {stri_peek(stri_pasteNames(self$perf.vals, sep = '='))}")
if (getOption("mlrng.debug", FALSE))
cat("\n", format(self), "\n")
}
),
active = list(
perf.vals = function() self$data$perf.vals[[1L]]
)
)
|
install.packages("tm")
install.packages("FSelector")
install.packages("stringi")
install.packages("rJava")
install.packages("rweka")
library(tm)
library(FSelector)
library(stringi)
# Load the data
TPAMI <- read.csv("IEEE_TPAMI_2015_2017.csv", encoding = "UTF-8", stringsAsFactors = FALSE)
JoF <- read.csv("Journal of Banking and Finance_2015_2017.csv", encoding = "UTF-8", stringsAsFactors = FALSE)
Journal.Data <- rbind(TPAMI[,c(4,12)], JoF[,c(4,12)])
names(Journal.Data) <- c("Journal", "Abstract")
# Construct the corpus for each journal with the abstracts
Journal.Corpus <- Corpus(VectorSource(Journal.Data$Abstract))
# Preprocessing
# 1: to lower case
Journal.Corpus <- tm_map(Journal.Corpus, content_transformer(stri_trans_tolower))
# 2: remove puntuations
Journal.Corpus <- tm_map(Journal.Corpus, content_transformer(removePunctuation))
# 3. remove numbers
Journal.Corpus <- tm_map(Journal.Corpus, content_transformer(removeNumbers))
# 4. remove stopwords (SMART stopwords list)
myStopwords <- c(stopwords("SMART"))
Journal.Corpus <- tm_map(Journal.Corpus, removeWords, myStopwords)
# 5. Stemming
Journal.Corpus <- tm_map(Journal.Corpus, stemDocument)
# Term-Document Matrix
Journal.TDM <- TermDocumentMatrix(Journal.Corpus, control = list(minWordLength = 1))
Journal.Frequency.TDM <- as.matrix(Journal.TDM)
pos_idx <- which(Journal.Frequency.TDM >= 1)
Journal.Binary.TDM <- Journal.Frequency.TDM
Journal.Binary.TDM[pos_idx] <- 1
Terms <- rownames(Journal.Binary.TDM)
# Remove multibyte strings
Terms <- iconv(Terms, "latin1", "ASCII", sub="")
rownames(Journal.Frequency.TDM) <- Terms
rownames(Journal.Binary.TDM) <- Terms
# Dimensionality Reduction 1: Feature Selection ---------------------------
Journal.Binary.DTM <- data.frame(t(as.matrix(Journal.Binary.TDM)))
Journal.Binary.DTM <- data.frame(Journal = Journal.Data$Journal, Journal.Binary.DTM)
# Metric 1: Acc
pos_idx <- which(Journal.Binary.DTM$Journal == "IEEE Transactions on Pattern Analysis and Machine Intelligence")
Acc <- sapply(Journal.Binary.DTM[,-1], function(x) sum(x[pos_idx])-sum(x[-pos_idx]))
FS_Acc <- data.frame(Terms, Acc)
# Sort the terms w.r.t. Acc
FS_Acc <- FS_Acc[order(FS_Acc$Acc, decreasing = TRUE),]
FS_Acc[1:30,]
FS_Acc[(length(Terms)-30):length(Terms),]
# Metric 2: F1-Measure
F1 <- sapply(Journal.Binary.DTM[,-1], function(x) 2*sum(x[pos_idx])/(length(pos_idx)+sum(x)))
FS_F1 <- data.frame(Terms, F1)
# Sort the terms w.r.t. F1
FS_F1 <- FS_F1[order(FS_F1$F1, decreasing = TRUE),]
FS_F1[1:30,]
FS_F1[(length(Terms)-30):length(Terms),]
# Metric 3: Chi-squared
CS <- chi.squared(Journal ~ ., Journal.Binary.DTM)
FS_CS <- data.frame(Terms, CS)
FS_CS <- FS_CS[order(FS_CS$attr_importance, decreasing = TRUE),]
FS_CS[1:30,]
FS_CS[(length(Terms)-30):length(Terms),]
# Metric 4: Information Gain
IG <- information.gain(Journal ~ ., Journal.Binary.DTM)
FS_IG <- data.frame(Terms, IG)
FS_IG <- FS_IG[order(FS_IG$attr_importance, decreasing = TRUE),]
FS_IG[1:30,]
FS_IG[(length(Terms)-30):length(Terms),]
# Combine Top 30 significant terms for the positivie class (TMAPI)
Top30.Terms <- data.frame(FS_Acc[1:30,], FS_F1[1:30,], FS_CS[1:30,], FS_IG[1:30,])
names(Top30.Terms) <- c("Terms_Acc", "Acc_Score", "Terms_F1", "F1_Score",
"Terms_Chi-Squared", "Chi-Squared Score", "Terms_Information Gain", "Information Gain_Score")
write.csv(Top30.Terms, file = "Top30 Terms for TPAMI.csv")
# Dimensionality Reduction 2-1: PCA ---------------------------------------
Journal.Frequency.DTM <- data.frame(t(as.matrix(Journal.Frequency.TDM)))
PCs <- prcomp(Journal.Frequency.DTM, scale = TRUE)
summary(PCs)
screeplot(PCs, npcs = 100, type = "lines", main = "Variance explained by each principal component")
Journal.PCs <- predict(PCs)
# Plot the articles in 2-dim
par(mfrow = c(1,2))
plot(Journal.PCs[,1:2], type = "n", xlim = c(-10,10), ylim = c(-5,5))
text(Journal.PCs[1:30,1], Journal.PCs[1:30,2], label = paste("TPAMI_", 1:30, sep = ""), col=4)
text(Journal.PCs[434:463,1], Journal.PCs[434:463,2], label = paste("JoF_", 1:30, sep = ""), col=10)
plot(Journal.PCs[,3:4], type = "n", xlim = c(-5,5), ylim = c(-2,5))
text(Journal.PCs[1:30,3], Journal.PCs[1:30,4], label = paste("TPAMI_", 1:30, sep = ""), col=4)
text(Journal.PCs[434:463,3], Journal.PCs[434:463,4], label = paste("JoF_", 1:30, sep = ""), col=10)
# Dimensionality Reduction 2-2: MDS ---------------------------------------
Journal.Cor <- cor(Journal.Frequency.TDM)
Journal.Dist <- 1-Journal.Cor
Journal.Dist <- as.data.frame(Journal.Dist)
MDS <- cmdscale(Journal.Dist, eig = TRUE, k = 10)
Journal.MDS <- MDS$points
par(mfrow = c(1,1))
plot(Journal.MDS[,1], Journal.MDS[,2], xlab = "Coordinate 1", ylab = "Coordinate 2",
main = "MDS plot", type = "n")
text(Journal.MDS[1:30,1], Journal.MDS[1:30,2], label = paste("TPAMI_", 1:30, sep = ""), col=4)
text(Journal.MDS[434:463,1], Journal.MDS[434:463,2], label = paste("JoF_", 1:30, sep = ""), col=10)
# Dimensionality Reduction 2-3: LSI ---------------------------------------
SVD.Mat <- svd(Journal.Frequency.TDM)
LSI.D <- SVD.Mat$d
LSI.U <- SVD.Mat$u
LSI.V <- SVD.Mat$v
# Plot the singluar vectors
plot(1:length(LSI.D), LSI.D)
# Select 2 features for documents
Document.Mat <- t((diag(2)*LSI.D[1:2]) %*% t(LSI.V[,1:2]))
plot(Document.Mat[,1], Document.Mat[,2], xlab = "SVD 1", ylab = "SVD 2",
main = "LSI plot for Documents", type = "n")
text(Document.Mat[1:30,1], Document.Mat[1:30,2], label = paste("TPAMI_", 1:30, sep = ""), col=4)
text(Document.Mat[434:463,1], Document.Mat[434:463,2], label = paste("JoF_", 1:30, sep = ""), col=10)
# Select 2 features for Terms
Term.Mat <- LSI.U[,1:2] %*% (diag(2)*LSI.D[1:2])
plot(Term.Mat[,1], Term.Mat[,2], xlab = "SVD 1", ylab = "SVD 2", type = "n",
main = "LSI plot for Terms")
text(Term.Mat[,1], Term.Mat[,2], label = Terms)
plot(Term.Mat[,1], Term.Mat[,2], xlab = "SVD 1", ylab = "SVD 2", type = "n",
xlim = c(0.04,0.06), ylim = c(0.04,0.06), main = "LSI plot for Terms")
text(Term.Mat[,1], Term.Mat[,2], label = Terms)
|
/NLP_NEW_Codes_Datasets/R05_Dimensionality Reduction.R
|
no_license
|
MvssHarshavardhan/NLP-material
|
R
| false
| false
| 6,196
|
r
|
install.packages("tm")
install.packages("FSelector")
install.packages("stringi")
install.packages("rJava")
install.packages("rweka")
library(tm)
library(FSelector)
library(stringi)
# Load the data
TPAMI <- read.csv("IEEE_TPAMI_2015_2017.csv", encoding = "UTF-8", stringsAsFactors = FALSE)
JoF <- read.csv("Journal of Banking and Finance_2015_2017.csv", encoding = "UTF-8", stringsAsFactors = FALSE)
Journal.Data <- rbind(TPAMI[,c(4,12)], JoF[,c(4,12)])
names(Journal.Data) <- c("Journal", "Abstract")
# Construct the corpus for each journal with the abstracts
Journal.Corpus <- Corpus(VectorSource(Journal.Data$Abstract))
# Preprocessing
# 1: to lower case
Journal.Corpus <- tm_map(Journal.Corpus, content_transformer(stri_trans_tolower))
# 2: remove puntuations
Journal.Corpus <- tm_map(Journal.Corpus, content_transformer(removePunctuation))
# 3. remove numbers
Journal.Corpus <- tm_map(Journal.Corpus, content_transformer(removeNumbers))
# 4. remove stopwords (SMART stopwords list)
myStopwords <- c(stopwords("SMART"))
Journal.Corpus <- tm_map(Journal.Corpus, removeWords, myStopwords)
# 5. Stemming
Journal.Corpus <- tm_map(Journal.Corpus, stemDocument)
# Term-Document Matrix
Journal.TDM <- TermDocumentMatrix(Journal.Corpus, control = list(minWordLength = 1))
Journal.Frequency.TDM <- as.matrix(Journal.TDM)
pos_idx <- which(Journal.Frequency.TDM >= 1)
Journal.Binary.TDM <- Journal.Frequency.TDM
Journal.Binary.TDM[pos_idx] <- 1
Terms <- rownames(Journal.Binary.TDM)
# Remove multibyte strings
Terms <- iconv(Terms, "latin1", "ASCII", sub="")
rownames(Journal.Frequency.TDM) <- Terms
rownames(Journal.Binary.TDM) <- Terms
# Dimensionality Reduction 1: Feature Selection ---------------------------
Journal.Binary.DTM <- data.frame(t(as.matrix(Journal.Binary.TDM)))
Journal.Binary.DTM <- data.frame(Journal = Journal.Data$Journal, Journal.Binary.DTM)
# Metric 1: Acc
pos_idx <- which(Journal.Binary.DTM$Journal == "IEEE Transactions on Pattern Analysis and Machine Intelligence")
Acc <- sapply(Journal.Binary.DTM[,-1], function(x) sum(x[pos_idx])-sum(x[-pos_idx]))
FS_Acc <- data.frame(Terms, Acc)
# Sort the terms w.r.t. Acc
FS_Acc <- FS_Acc[order(FS_Acc$Acc, decreasing = TRUE),]
FS_Acc[1:30,]
FS_Acc[(length(Terms)-30):length(Terms),]
# Metric 2: F1-Measure
F1 <- sapply(Journal.Binary.DTM[,-1], function(x) 2*sum(x[pos_idx])/(length(pos_idx)+sum(x)))
FS_F1 <- data.frame(Terms, F1)
# Sort the terms w.r.t. F1
FS_F1 <- FS_F1[order(FS_F1$F1, decreasing = TRUE),]
FS_F1[1:30,]
FS_F1[(length(Terms)-30):length(Terms),]
# Metric 3: Chi-squared
CS <- chi.squared(Journal ~ ., Journal.Binary.DTM)
FS_CS <- data.frame(Terms, CS)
FS_CS <- FS_CS[order(FS_CS$attr_importance, decreasing = TRUE),]
FS_CS[1:30,]
FS_CS[(length(Terms)-30):length(Terms),]
# Metric 4: Information Gain
IG <- information.gain(Journal ~ ., Journal.Binary.DTM)
FS_IG <- data.frame(Terms, IG)
FS_IG <- FS_IG[order(FS_IG$attr_importance, decreasing = TRUE),]
FS_IG[1:30,]
FS_IG[(length(Terms)-30):length(Terms),]
# Combine Top 30 significant terms for the positivie class (TMAPI)
Top30.Terms <- data.frame(FS_Acc[1:30,], FS_F1[1:30,], FS_CS[1:30,], FS_IG[1:30,])
names(Top30.Terms) <- c("Terms_Acc", "Acc_Score", "Terms_F1", "F1_Score",
"Terms_Chi-Squared", "Chi-Squared Score", "Terms_Information Gain", "Information Gain_Score")
write.csv(Top30.Terms, file = "Top30 Terms for TPAMI.csv")
# Dimensionality Reduction 2-1: PCA ---------------------------------------
Journal.Frequency.DTM <- data.frame(t(as.matrix(Journal.Frequency.TDM)))
PCs <- prcomp(Journal.Frequency.DTM, scale = TRUE)
summary(PCs)
screeplot(PCs, npcs = 100, type = "lines", main = "Variance explained by each principal component")
Journal.PCs <- predict(PCs)
# Plot the articles in 2-dim
par(mfrow = c(1,2))
plot(Journal.PCs[,1:2], type = "n", xlim = c(-10,10), ylim = c(-5,5))
text(Journal.PCs[1:30,1], Journal.PCs[1:30,2], label = paste("TPAMI_", 1:30, sep = ""), col=4)
text(Journal.PCs[434:463,1], Journal.PCs[434:463,2], label = paste("JoF_", 1:30, sep = ""), col=10)
plot(Journal.PCs[,3:4], type = "n", xlim = c(-5,5), ylim = c(-2,5))
text(Journal.PCs[1:30,3], Journal.PCs[1:30,4], label = paste("TPAMI_", 1:30, sep = ""), col=4)
text(Journal.PCs[434:463,3], Journal.PCs[434:463,4], label = paste("JoF_", 1:30, sep = ""), col=10)
# Dimensionality Reduction 2-2: MDS ---------------------------------------
Journal.Cor <- cor(Journal.Frequency.TDM)
Journal.Dist <- 1-Journal.Cor
Journal.Dist <- as.data.frame(Journal.Dist)
MDS <- cmdscale(Journal.Dist, eig = TRUE, k = 10)
Journal.MDS <- MDS$points
par(mfrow = c(1,1))
plot(Journal.MDS[,1], Journal.MDS[,2], xlab = "Coordinate 1", ylab = "Coordinate 2",
main = "MDS plot", type = "n")
text(Journal.MDS[1:30,1], Journal.MDS[1:30,2], label = paste("TPAMI_", 1:30, sep = ""), col=4)
text(Journal.MDS[434:463,1], Journal.MDS[434:463,2], label = paste("JoF_", 1:30, sep = ""), col=10)
# Dimensionality Reduction 2-3: LSI ---------------------------------------
SVD.Mat <- svd(Journal.Frequency.TDM)
LSI.D <- SVD.Mat$d
LSI.U <- SVD.Mat$u
LSI.V <- SVD.Mat$v
# Plot the singluar vectors
plot(1:length(LSI.D), LSI.D)
# Select 2 features for documents
Document.Mat <- t((diag(2)*LSI.D[1:2]) %*% t(LSI.V[,1:2]))
plot(Document.Mat[,1], Document.Mat[,2], xlab = "SVD 1", ylab = "SVD 2",
main = "LSI plot for Documents", type = "n")
text(Document.Mat[1:30,1], Document.Mat[1:30,2], label = paste("TPAMI_", 1:30, sep = ""), col=4)
text(Document.Mat[434:463,1], Document.Mat[434:463,2], label = paste("JoF_", 1:30, sep = ""), col=10)
# Select 2 features for Terms
Term.Mat <- LSI.U[,1:2] %*% (diag(2)*LSI.D[1:2])
plot(Term.Mat[,1], Term.Mat[,2], xlab = "SVD 1", ylab = "SVD 2", type = "n",
main = "LSI plot for Terms")
text(Term.Mat[,1], Term.Mat[,2], label = Terms)
plot(Term.Mat[,1], Term.Mat[,2], xlab = "SVD 1", ylab = "SVD 2", type = "n",
xlim = c(0.04,0.06), ylim = c(0.04,0.06), main = "LSI plot for Terms")
text(Term.Mat[,1], Term.Mat[,2], label = Terms)
|
library(sqldf)
power <- read.csv.sql("household_power_consumption.txt",
sql = "select * from file where Date ='1/2/2007'or Date ='2/2/2007'" ,
header = TRUE, sep = ";")
library(lubridate)
power$datetime <- paste(power$Date, power$Time, sep =',')
power$datetime <- dmy_hms(power$datetime)
power$Date <- dmy(power$Date)
power$Time <- hms(power$Time)
png(filename = "plot3.png")
plot(power$datetime, power$Sub_metering_1, type = "l",
xlab = "", ylab ="Energy sub metering")
lines(power$datetime, power$Sub_metering_2, type = "l",
col= "Red", xlab = "", ylab ="Global Active Power (kilowatts)")
lines(power$datetime, power$Sub_metering_3, type = "l",
col= "Blue", xlab = "", ylab ="Global Active Power (kilowatts)")
legend("topright", inset = c(0,0), lty = 1, col = c("Black","Red", "Blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
/plot3.R
|
no_license
|
sunanthas9/RPlots_Basic
|
R
| false
| false
| 941
|
r
|
library(sqldf)
power <- read.csv.sql("household_power_consumption.txt",
sql = "select * from file where Date ='1/2/2007'or Date ='2/2/2007'" ,
header = TRUE, sep = ";")
library(lubridate)
power$datetime <- paste(power$Date, power$Time, sep =',')
power$datetime <- dmy_hms(power$datetime)
power$Date <- dmy(power$Date)
power$Time <- hms(power$Time)
png(filename = "plot3.png")
plot(power$datetime, power$Sub_metering_1, type = "l",
xlab = "", ylab ="Energy sub metering")
lines(power$datetime, power$Sub_metering_2, type = "l",
col= "Red", xlab = "", ylab ="Global Active Power (kilowatts)")
lines(power$datetime, power$Sub_metering_3, type = "l",
col= "Blue", xlab = "", ylab ="Global Active Power (kilowatts)")
legend("topright", inset = c(0,0), lty = 1, col = c("Black","Red", "Blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
# Statistical analyses and summary statistics
# Tim Szewczyk
# Created 2015 April 17
#######
## Load libraries, functions, data
#######
library(ggplot2); theme_set(theme_bw()); library(grid); library(gridExtra)
library(xlsx); library(plyr); library(vegan); library(betapart); library(lme4)
source("R_scripts/FuncsGen.R")
loadAll()
#--- graphical parameters ---#
w <- 84 # width (mm)
h <- 75 # height (mm)
theme_is <- theme(axis.title.x=element_text(size=12, vjust=-0.3),
axis.text.x=element_text(size=12),
axis.title.y=element_text(size=12, vjust=1.1),
axis.text.y=element_text(size=12),
legend.title=element_text(size=8, vjust=0.3),
legend.key=element_rect(colour=NA),
legend.key.size=unit(0.25, "cm"),
legend.text=element_text(size=8),
panel.grid=element_blank())
#########
## Figure 1
#########
#--- 1a. mean and median range size by latitude ---#
f1a <- ggplot(over.df, aes(x=abs(Latsamp))) +
theme_is + ylim(0,1100) + theme(legend.position=c(0.85, 0.15)) +
geom_point(aes(y=sp.mnRng, shape="Mean"), size=2.5) +
geom_segment(aes(xend=abs(Latsamp),
y=sp.mnRng+seRng,
yend=sp.mnRng-seRng), size=1,
arrow=arrow(angle=90, length=unit(0.11, "cm"), ends="both")) +
stat_smooth(aes(y=sp.mnRng), se=FALSE, method="lm",
colour="black", size=1) +
geom_point(aes(y=sp.medRng, shape="Median"), size=2.5) +
stat_smooth(aes(y=sp.medRng), se=FALSE, method="lm",
colour="black", size=1, linetype=2) +
annotate("text", x=12, y=1000, label="a.", size=5) +
scale_shape_manual(name="", values=c("Mean"=19,
"Median"=1)) +
labs(x="Degrees from equator", y="Elevational range (m)")
#--- 1b. mean range size on truncated mountains by latitude ---#
f1b <- ggplot(over.df, aes(x=abs(Latsamp))) +
theme_is + ylim(0,1100) + theme(legend.position=c(0.85, 0.15)) +
stat_smooth(aes(y=sp.mnRng.2000, colour="2000m"),
se=FALSE, method="lm", size=1) +
stat_smooth(aes(y=sp.mnRng.1800, colour="1800m"),
se=FALSE, method="lm", size=1) +
stat_smooth(aes(y=sp.mnRng.1600, colour="1600m"),
se=FALSE, method="lm", size=1) +
geom_segment(aes(xend=abs(Latsamp),
y=sp.mnRng.2000+sp.seRng.2000,
yend=sp.mnRng.2000-sp.seRng.2000),
colour="black", size=1,
arrow=arrow(angle=90, length=unit(0.11, "cm"), ends="both")) +
geom_segment(aes(xend=abs(Latsamp),
y=sp.mnRng.1800+sp.seRng.1800,
yend=sp.mnRng.1800-sp.seRng.1800),
colour="gray40", size=1,
arrow=arrow(angle=90, length=unit(0.11, "cm"), ends="both")) +
geom_segment(aes(xend=abs(Latsamp),
y=sp.mnRng.1600+sp.seRng.1600,
yend=sp.mnRng.1600-sp.seRng.1600),
colour="gray70", size=1,
arrow=arrow(angle=90, length=unit(0.11, "cm"), ends="both")) +
geom_point(aes(y=sp.mnRng.2000, colour="2000m"), size=2.5) +
geom_point(aes(y=sp.mnRng.1800, colour="1800m"), size=2.5) +
geom_point(aes(y=sp.mnRng.1600, colour="1600m"), size=2.5) +
annotate("text", x=12, y=1000, label="b.", size=5) +
scale_colour_manual(name="Truncation",
values=c("2000m"="black",
"1800m"="gray45",
"1600m"="gray70")) +
labs(x="Degrees from equator", y="Elevational range (m)")
#--- save figure ---#
fig1 <- arrangeGrob(f1a, f1b, ncol=2)
ggsave("ms/pubFigs/Fig1.pdf", fig1, width=174, height=h, units="mm")
#########
## Figure 2
#########
#--- sp.STB vs latitude ---#
fig2 <- ggplot(over.df, aes(x=abs(Latsamp))) +
theme_is + theme(legend.position=c(0.825, 0.825)) +
stat_smooth(aes(y=sp.STB), se=FALSE,
method="lm", size=1.5, colour="black") +
stat_smooth(aes(y=gen.STB), se=FALSE,
method="lm", size=1.5, colour="black") +
stat_smooth(aes(y=sf.STB), se=FALSE,
method="lm", size=1.5, colour="black") +
stat_smooth(aes(y=sp.STB), se=FALSE,
method="lm", size=1, colour="black") +
stat_smooth(aes(y=gen.STB), se=FALSE,
method="lm", size=1, colour="gray70") +
stat_smooth(aes(y=sf.STB), se=FALSE,
method="lm", size=1, colour="white") +
geom_point(aes(y=sp.STB, fill="Species"), size=2, pch=21) +
geom_point(aes(y=gen.STB, fill="Genus"), size=2, pch=21) +
geom_point(aes(y=sf.STB, fill="Subfamily"), size=2, pch=21) +
scale_fill_manual(name="Taxonomic \nLevel",
values=c("Species"="black",
"Genus"="gray70",
"Subfamily"="white")) +
labs(x="Degrees from equator",
y=expression(paste('Gradient-wide ', beta, ' diversity (',
beta['st'],
')')))
#--- save figure ---#
ggsave("ms/pubFigs/Fig2.pdf", fig2, width=w, height=h, units="mm")
#########
## Figure 3
#########
#--- turnover proportion by taxonomy and zone ---#
fig3 <- ggplot(betaTax.df, aes(x=TaxLevel, y=Turnover/TotalBeta, fill=Zone)) +
ylim(0,1) + theme_is +
theme(axis.title.y=element_text(size=10, vjust=1.1)) +
theme(legend.key.size=unit(0.5, "cm")) +
geom_hline(yintercept=0.5, linetype=2, colour="gray40") +
geom_boxplot() +
annotate("text", label="Higher \nturnover", x=3.15, y=0.56,
angle=90, hjust=0, size=3) +
annotate("text", label="Higher \nnestedness", x=3.15, y=0.47,
angle=90, hjust=1.1, size=3) +
annotate("segment", x=3.46, xend=3.46, y=0.55, yend=0.9,
arrow=arrow(angle=35, length=unit(0.22, "cm"), ends="last")) +
annotate("segment", x=3.46, xend=3.46, y=0.45, yend=0.1,
arrow=arrow(angle=35, length=unit(0.22, "cm"), ends="last")) +
scale_fill_manual(name="", values=c("white", "gray70")) +
labs(x="", y=expression('Gradient-wide turnover proportion'))
#--- save figure ---#
ggsave("ms/pubFigs/Fig3.pdf", fig3, width=129, height=h, units="mm")
#########
## Figure 4
#########
#--- richness patterns of each taxonomic level ---#
fig4 <- ggplot(patt.barSUM, aes(x=Pattern, fill=Tax, y=num)) +
theme_is + ylim(0,15) + theme(legend.position=c(0.175, 0.825)) +
geom_bar(stat="identity", position="dodge", colour="black") +
scale_fill_manual(name="Taxonomic \nLevel",
values=c("gray10", "gray70", "white")) +
labs(x="Richness Pattern", y="Number of gradients")
#--- save figure ---#
ggsave("ms/pubFigs/Fig4.pdf", fig4, width=w, height=h, units="mm")
#########
## Figure 5
#########
#--- dominant genus predicting rest ---#
f5a <- ggplot(tvars.df, aes(x=SmaxDivGen, y=S-SmaxDivGen)) +
theme_is +
stat_smooth(aes(group=Label), se=F, method="lm",
colour="gray", size=1) +
stat_smooth(se=F, method="lm", colour="black", size=1.5) +
geom_point(size=3) +
annotate("text", x=5, y=190, label="a.", size=5) +
labs(x="Richness of most speciose genus",
y=expression("Richness of remaining genera"))
#--- dominant genus predicting rest ---#
f5b <- ggplot(tvars.df, aes(x=SmaxDivSF, y=S-SmaxDivSF)) +
theme_is + theme(axis.title.y=element_text(size=12, vjust=0.5)) +
stat_smooth(aes(group=Label), se=F, method="lm",
colour="gray", size=1) +
stat_smooth(se=F, method="lm", colour="black", size=1.5) +
geom_point(size=3) +
annotate("text", x=18, y=95, label="b.", size=5) +
labs(x="Richness of most speciose subfamily",
y=expression("Richness of remaining subfamilies"))
#--- save figure ---#
fig5 <- arrangeGrob(f5a, f5b, ncol=2)
ggsave("ms/pubFigs/Fig5.pdf", fig5, width=174, height=h, units="mm")
|
/R_scripts/figsPublish.R
|
no_license
|
Sz-Tim/isRev
|
R
| false
| false
| 8,151
|
r
|
# Statistical analyses and summary statistics
# Tim Szewczyk
# Created 2015 April 17
#######
## Load libraries, functions, data
#######
library(ggplot2); theme_set(theme_bw()); library(grid); library(gridExtra)
library(xlsx); library(plyr); library(vegan); library(betapart); library(lme4)
source("R_scripts/FuncsGen.R")
loadAll()
#--- graphical parameters ---#
w <- 84 # width (mm)
h <- 75 # height (mm)
theme_is <- theme(axis.title.x=element_text(size=12, vjust=-0.3),
axis.text.x=element_text(size=12),
axis.title.y=element_text(size=12, vjust=1.1),
axis.text.y=element_text(size=12),
legend.title=element_text(size=8, vjust=0.3),
legend.key=element_rect(colour=NA),
legend.key.size=unit(0.25, "cm"),
legend.text=element_text(size=8),
panel.grid=element_blank())
#########
## Figure 1
#########
#--- 1a. mean and median range size by latitude ---#
f1a <- ggplot(over.df, aes(x=abs(Latsamp))) +
theme_is + ylim(0,1100) + theme(legend.position=c(0.85, 0.15)) +
geom_point(aes(y=sp.mnRng, shape="Mean"), size=2.5) +
geom_segment(aes(xend=abs(Latsamp),
y=sp.mnRng+seRng,
yend=sp.mnRng-seRng), size=1,
arrow=arrow(angle=90, length=unit(0.11, "cm"), ends="both")) +
stat_smooth(aes(y=sp.mnRng), se=FALSE, method="lm",
colour="black", size=1) +
geom_point(aes(y=sp.medRng, shape="Median"), size=2.5) +
stat_smooth(aes(y=sp.medRng), se=FALSE, method="lm",
colour="black", size=1, linetype=2) +
annotate("text", x=12, y=1000, label="a.", size=5) +
scale_shape_manual(name="", values=c("Mean"=19,
"Median"=1)) +
labs(x="Degrees from equator", y="Elevational range (m)")
#--- 1b. mean range size on truncated mountains by latitude ---#
f1b <- ggplot(over.df, aes(x=abs(Latsamp))) +
theme_is + ylim(0,1100) + theme(legend.position=c(0.85, 0.15)) +
stat_smooth(aes(y=sp.mnRng.2000, colour="2000m"),
se=FALSE, method="lm", size=1) +
stat_smooth(aes(y=sp.mnRng.1800, colour="1800m"),
se=FALSE, method="lm", size=1) +
stat_smooth(aes(y=sp.mnRng.1600, colour="1600m"),
se=FALSE, method="lm", size=1) +
geom_segment(aes(xend=abs(Latsamp),
y=sp.mnRng.2000+sp.seRng.2000,
yend=sp.mnRng.2000-sp.seRng.2000),
colour="black", size=1,
arrow=arrow(angle=90, length=unit(0.11, "cm"), ends="both")) +
geom_segment(aes(xend=abs(Latsamp),
y=sp.mnRng.1800+sp.seRng.1800,
yend=sp.mnRng.1800-sp.seRng.1800),
colour="gray40", size=1,
arrow=arrow(angle=90, length=unit(0.11, "cm"), ends="both")) +
geom_segment(aes(xend=abs(Latsamp),
y=sp.mnRng.1600+sp.seRng.1600,
yend=sp.mnRng.1600-sp.seRng.1600),
colour="gray70", size=1,
arrow=arrow(angle=90, length=unit(0.11, "cm"), ends="both")) +
geom_point(aes(y=sp.mnRng.2000, colour="2000m"), size=2.5) +
geom_point(aes(y=sp.mnRng.1800, colour="1800m"), size=2.5) +
geom_point(aes(y=sp.mnRng.1600, colour="1600m"), size=2.5) +
annotate("text", x=12, y=1000, label="b.", size=5) +
scale_colour_manual(name="Truncation",
values=c("2000m"="black",
"1800m"="gray45",
"1600m"="gray70")) +
labs(x="Degrees from equator", y="Elevational range (m)")
#--- save figure ---#
fig1 <- arrangeGrob(f1a, f1b, ncol=2)
ggsave("ms/pubFigs/Fig1.pdf", fig1, width=174, height=h, units="mm")
#########
## Figure 2
#########
#--- sp.STB vs latitude ---#
fig2 <- ggplot(over.df, aes(x=abs(Latsamp))) +
theme_is + theme(legend.position=c(0.825, 0.825)) +
stat_smooth(aes(y=sp.STB), se=FALSE,
method="lm", size=1.5, colour="black") +
stat_smooth(aes(y=gen.STB), se=FALSE,
method="lm", size=1.5, colour="black") +
stat_smooth(aes(y=sf.STB), se=FALSE,
method="lm", size=1.5, colour="black") +
stat_smooth(aes(y=sp.STB), se=FALSE,
method="lm", size=1, colour="black") +
stat_smooth(aes(y=gen.STB), se=FALSE,
method="lm", size=1, colour="gray70") +
stat_smooth(aes(y=sf.STB), se=FALSE,
method="lm", size=1, colour="white") +
geom_point(aes(y=sp.STB, fill="Species"), size=2, pch=21) +
geom_point(aes(y=gen.STB, fill="Genus"), size=2, pch=21) +
geom_point(aes(y=sf.STB, fill="Subfamily"), size=2, pch=21) +
scale_fill_manual(name="Taxonomic \nLevel",
values=c("Species"="black",
"Genus"="gray70",
"Subfamily"="white")) +
labs(x="Degrees from equator",
y=expression(paste('Gradient-wide ', beta, ' diversity (',
beta['st'],
')')))
#--- save figure ---#
ggsave("ms/pubFigs/Fig2.pdf", fig2, width=w, height=h, units="mm")
#########
## Figure 3
#########
#--- turnover proportion by taxonomy and zone ---#
fig3 <- ggplot(betaTax.df, aes(x=TaxLevel, y=Turnover/TotalBeta, fill=Zone)) +
ylim(0,1) + theme_is +
theme(axis.title.y=element_text(size=10, vjust=1.1)) +
theme(legend.key.size=unit(0.5, "cm")) +
geom_hline(yintercept=0.5, linetype=2, colour="gray40") +
geom_boxplot() +
annotate("text", label="Higher \nturnover", x=3.15, y=0.56,
angle=90, hjust=0, size=3) +
annotate("text", label="Higher \nnestedness", x=3.15, y=0.47,
angle=90, hjust=1.1, size=3) +
annotate("segment", x=3.46, xend=3.46, y=0.55, yend=0.9,
arrow=arrow(angle=35, length=unit(0.22, "cm"), ends="last")) +
annotate("segment", x=3.46, xend=3.46, y=0.45, yend=0.1,
arrow=arrow(angle=35, length=unit(0.22, "cm"), ends="last")) +
scale_fill_manual(name="", values=c("white", "gray70")) +
labs(x="", y=expression('Gradient-wide turnover proportion'))
#--- save figure ---#
ggsave("ms/pubFigs/Fig3.pdf", fig3, width=129, height=h, units="mm")
#########
## Figure 4
#########
#--- richness patterns of each taxonomic level ---#
fig4 <- ggplot(patt.barSUM, aes(x=Pattern, fill=Tax, y=num)) +
theme_is + ylim(0,15) + theme(legend.position=c(0.175, 0.825)) +
geom_bar(stat="identity", position="dodge", colour="black") +
scale_fill_manual(name="Taxonomic \nLevel",
values=c("gray10", "gray70", "white")) +
labs(x="Richness Pattern", y="Number of gradients")
#--- save figure ---#
ggsave("ms/pubFigs/Fig4.pdf", fig4, width=w, height=h, units="mm")
#########
## Figure 5
#########
#--- dominant genus predicting rest ---#
f5a <- ggplot(tvars.df, aes(x=SmaxDivGen, y=S-SmaxDivGen)) +
theme_is +
stat_smooth(aes(group=Label), se=F, method="lm",
colour="gray", size=1) +
stat_smooth(se=F, method="lm", colour="black", size=1.5) +
geom_point(size=3) +
annotate("text", x=5, y=190, label="a.", size=5) +
labs(x="Richness of most speciose genus",
y=expression("Richness of remaining genera"))
#--- dominant genus predicting rest ---#
f5b <- ggplot(tvars.df, aes(x=SmaxDivSF, y=S-SmaxDivSF)) +
theme_is + theme(axis.title.y=element_text(size=12, vjust=0.5)) +
stat_smooth(aes(group=Label), se=F, method="lm",
colour="gray", size=1) +
stat_smooth(se=F, method="lm", colour="black", size=1.5) +
geom_point(size=3) +
annotate("text", x=18, y=95, label="b.", size=5) +
labs(x="Richness of most speciose subfamily",
y=expression("Richness of remaining subfamilies"))
#--- save figure ---#
fig5 <- arrangeGrob(f5a, f5b, ncol=2)
ggsave("ms/pubFigs/Fig5.pdf", fig5, width=174, height=h, units="mm")
|
setwd('')
#Reading data
dat <- data.frame(read.csv("data/Polyascus_proteins.csv"))
#select Area
dat1_pol <- dat[,c(3, 18:29)]
rownames(dat1_pol) <- dat1_pol[,1]
dat1_pol <- dat1_pol[,-1]
head(dat1_pol)
#####################Information about the samples##################
library(readxl)
fact_pol <- data.frame(read_excel("data/Polyascus_fact.xlsx"))
rownames(fact_pol) <- fact_pol[,1]
fact_pol <- fact_pol[, -1]
head(fact_pol)
fact_pol$Differentiation <- as.factor(fact_pol$Differentiation)
fact_pol$Differentiation
#Infection status as a factor
fact_pol$Status <- as.factor(fact_pol$Status)
fact_pol$Status
#Sex as a factor
fact_pol$Sex <- as.factor(fact_pol$Sex)
fact_pol$Sex
colnames(dat1_pol) <- rownames(fact_pol)
###############Filter##############
#as Differentiation
h_fem <- dat1_pol[which(rowMeans(!is.na(dat1_pol[,rownames(subset(fact_pol,Differentiation=="hf"))])) >= 2/3), ]
h_male <- dat1_pol[which(rowMeans(!is.na(dat1_pol[,rownames(subset(fact_pol,Differentiation=="hm"))])) >= 2/3), ]
in_fem <- dat1_pol[which(rowMeans(!is.na(dat1_pol[,rownames(subset(fact_pol,Differentiation=="if"))])) >= 2/3), ]
in_male <- dat1_pol[which(rowMeans(!is.na(dat1_pol[,rownames(subset(fact_pol,Differentiation=="im"))])) >= 2/3), ]
#as infection status
healthy <- dat1_pol[which(rowMeans(!is.na(dat1_pol[,rownames(subset(fact_pol,Status=="healthy"))])) >= 5/6), ]
infected <- dat1_pol[which(rowMeans(!is.na(dat1_pol[,rownames(subset(fact_pol,Status=="infected"))])) >= 5/6), ]
################Venn diagram#########
library(devtools)
library(ggvenn)
vennn_pol <- list(hth_fem = rownames(h_fem), hth_male = rownames(h_male), inf_fem = rownames(in_fem), inf_male = rownames(in_male))
tiff('Polyascus_Venn_diagram.tiff', bg = 'transparent', width = 1200, height = 900)
ggvenn(
vennn_pol,
fill_color = c("#d200ff", "#04fb04", "#fb0432", "#048afb"),
stroke_size = 0.5, set_name_size = 8, text_size = 7,
)
dev.off()
venn_status <- list(Healthy = rownames(healthy), Infected = rownames(infected))
tiff('Polyascus_inf_Venn_diagram.tiff', bg = 'transparent', width = 1200, height = 900)
ggvenn(
venn_status,
fill_color = c("#50a4dc", "#ffe255"),
stroke_size = 0.5, set_name_size = 8, text_size = 7,
)
dev.off()
#Removing NA
colSums(is.na(dat1_pol))
dat2_pol <- dat1_pol[which(rowMeans(!is.na(dat1_pol)) > 0.85), ]
mean(complete.cases(dat2_pol))
colSums(is.na(dat2_pol))
#############Imputation#########
library(impute)
tdat_pol <- t(dat2_pol)
pol_knn1 <- impute.knn(tdat_pol, k = 5)
pol_knn <- t(pol_knn1$data)
head(pol_knn)
mean(complete.cases(pol_knn))
#Expression data distribution
library(RColorBrewer)
pal <- brewer.pal(n = 9, name = "Set1")
cols <- pal[fact_pol$Differentiation]
boxplot(pol_knn, outline = FALSE, col = cols, main = "Raw data")
legend("topright", levels(fact_pol$Differentiation), fill = pal, bty = "n", xpd = T)
colSums(pol_knn)
#######Log-transformation#######
dat_log_pol <- log2(pol_knn+1)
mean(complete.cases(dat_log_pol))
boxplot(dat_log_pol, outline = FALSE, col = cols, main = "Log-transformed data")
legend("topright", levels(fact_pol$Differentiation), fill = pal, bty = "n", xpd = T)
##########Quantile normalization##########
library(limma)
dat_norm_pol <- normalizeQuantiles(dat_log_pol)
boxplot(dat_norm_pol, col = cols, main = "Normalized data")
legend("topright", levels(fact_pol$Differentiation), fill = pal, bty = "n", xpd = T)
mean(complete.cases(dat_norm_pol))
colSums(is.na(dat_norm_pol))
###########nMDS##########
library('vegan')
tdat_norm_pol <- t(dat_norm_pol)
pol_ord <- metaMDS(tdat_norm_pol,
distance = "euclidean",
autotransform = FALSE)
pol_ord$stress
#######nMDS ggplot######
nmds_scrs <- as.data.frame(scores(pol_ord, display = "sites"))
nmds_scrs <- cbind(nmds_scrs, Differentiation = fact_pol$Differentiation)
pal_colour <- c("#d200ff", "#04fb04", "#fb0432", "#048afb")
nmds_plot <- ggplot(nmds_scrs, aes(x = NMDS1, y = NMDS2)) +
scale_colour_manual(values = pal_colour) +
geom_polygon(aes(group = factor(Differentiation), colour = factor(Differentiation)), size = 1, fill = 'transparent') +
geom_point(aes(x = NMDS1, y = NMDS2, colour = factor(Differentiation)), size = 4) +
coord_fixed() +
theme_classic() +
labs(colour = 'Differentiation') +
theme(legend.position = "right", legend.text = element_text(size = 12), legend.title = element_text(size = 12), axis.text = element_text(size = 12))
tiff('nMDS.tiff', units="in", width=12, height=8, res=600, compression = 'lzw')
nmds_plot
dev.off()
#######PLS-DA#############
library(mixOmics)
plsda_pol <- plsda(tdat_norm_pol, fact_pol$Differentiation, ncomp = 11)
symb <- c(16)
tiff('plsda.tiff', units="in", width=12, height=8, res=600, compression = 'lzw')
plotIndiv(plsda_pol, legend = TRUE, ellipse = TRUE, ind.names = FALSE,
pch = symb, col = pal_colour, title = 'PLS-DA')
dev.off()
#####################sPLS-DA################
#Model selection
list.keepX <- c(5:10, seq(20, 100, 10))
opt_val <- tune.splsda(tdat_norm_pol, fact_pol$Differentiation, ncomp = 3, nrepeat = 1000, folds = 3, test.keepX = list.keepX, cpus = 6)
opt_val$choice.keepX
opt_val$choice.ncomp
pal_colour <- c("#d200ff", "#04fb04", "#fb0432", "#048afb")
symb <- c(16)
splsda_pol <- splsda(tdat_norm_pol, fact_pol$Differentiation, ncomp = 2, keepX = c(40, 20))
tiff('sPLS-DA.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
plotIndiv(splsda_pol, legend = TRUE, ellipse = TRUE, ind.names = FALSE,
pch = symb, col = pal_colour, title = 'sPLS-DA')
dev.off()
#####Loadings####
library('dplyr')
plsda_load <- splsda_pol$loadings
plsda_mat <- plsda_load$X
plsda_mat <- as.data.frame(plsda_mat)
comp1_plot <- plotLoadings(splsda_pol, comp = 1, method = 'mean', contrib = 'max', ndisplay = 20,
title = 'Loading 1 component', size.title = 2, legend = TRUE)
comp2_plot <- plotLoadings(splsda_pol, comp = 2, method = 'mean', contrib = 'max', ndisplay = 20,
title = 'Loadings 2 component', size.title = 2, legend = FALSE)
dev.off()
#####Component 1####
comp1_loadings <- plsda_load$X[,1]
comp1_loadings <- as.data.frame(comp1_loadings)
max_comp1 <- comp1_loadings %>%
arrange(desc(abs(comp1_loadings))) %>%
slice(1:20)
max_comp1$Accession <- row.names(max_comp1)
group_contrib <- comp1_plot$GroupContrib
max1_contr <- cbind(max_comp1, group_contrib)
descr <- dat[,c(3,46)]
load_filtered <- descr %>%
filter(descr$Accession %in% max1_contr$Accession)
max_comp1_descr <- left_join(max1_contr, load_filtered, by = 'Accession')
max_comp1_descr <- max_comp1_descr %>%
rename(Loadings = comp1_loadings)
max_comp1_descr <- max_comp1_descr %>%
rename(Contributions = group_contrib)
max1_descr <- max_comp1_descr[c(1,2,4:9,11:15,20),]
row.names(max1_descr) <- max1_descr$Description
max1_descr <- max1_descr[c(1:10),c(1,3,4)]
row.names(max1_descr) <- gsub("OS.*.", replacement = "", row.names(max1_descr))
max1_descr$Description <- gsub("OS.*.", replacement = "", max1_descr$Description)
pal_colour <- c("#d200ff", "#04fb04", "#fb0432", "#048afb")
bar_colour <- c("#d200ff", "#04fb04", "#048afb")
comp1_bar <- ggplot(max1_descr, aes(x = reorder(Description, abs(Loadings)), y = Loadings, fill = Contributions)) +
geom_bar(stat = 'identity', colour = 'black') + xlab('Proteins') + coord_flip() +
scale_fill_manual(values = bar_colour) +
theme_classic() +
theme(axis.text = element_text(size = 25), axis.title = element_text(size = 30),
legend.title = element_text(size = 30), legend.text = element_text(size = 25),
legend.spacing.y = unit(1, 'cm')) +
guides(fill = guide_legend(byrow = TRUE))
tiff('Loadings1.tiff', units="px", width=13200, height=5500, res=600, compression = 'none')
comp1_bar
dev.off()
#####Component2####
comp2_loadings <- plsda_load$X[,2]
comp2_loadings <- as.data.frame(comp2_loadings)
max_comp2 <- comp2_loadings %>%
arrange(desc(abs(comp2_loadings))) %>%
slice(1:20)
max_comp2$Accession <- row.names(max_comp2)
group_contrib2 <- comp2_plot$GroupContrib
max2_contr <- cbind(max_comp2, group_contrib2)
descr <- dat[,c(3,46)]
load_filtered2 <- descr %>%
filter(descr$Accession %in% max2_contr$Accession)
max_comp2_descr <- left_join(max2_contr, load_filtered2, by = 'Accession')
max_comp2_descr <- max_comp2_descr %>%
rename(Loadings = comp2_loadings)
max_comp2_descr <- max_comp2_descr %>%
rename(Contributions = group_contrib2)
max2_descr <- max_comp2_descr[c(1,4:8,10,14,17:20),]
row.names(max2_descr) <- max2_descr$Description
max2_descr <- max2_descr[c(1:10),c(1,3,4)]
max2_descr$Description <- gsub("OS.*.", replacement = "", max2_descr$Description)
pal_colour <- c("#d200ff", "#04fb04", "#fb0432", "#048afb")
comp2_bar <- ggplot(max2_descr, aes(x = reorder(makeUnique(Description), abs(Loadings)), y = Loadings, fill = Contributions)) +
geom_bar(stat = 'identity', colour = 'black') + xlab('Proteins') + coord_flip() +
scale_fill_manual(values = pal_colour) +
theme_classic() +
theme(axis.text = element_text(size = 25), axis.title = element_text(size = 30),
legend.title = element_text(size = 30), legend.text = element_text(size = 25),
legend.spacing.y = unit(1, 'cm')) +
guides(fill = guide_legend(byrow = TRUE))
tiff('Loadings2.tiff', units="px", width=13200, height=5500, res=600, compression = 'none')
comp2_bar
dev.off()
###############Filtered proteins##############
sign_prot <- rownames(filter(plsda_mat, comp1 != 0 | comp2 != 0))
head(sign_prot)
#########Differential expression analysis############
X_pol <- model.matrix(~ 0 + fact_pol$Differentiation)
X_pol
colnames(X_pol) <- c('hlth_fem', 'hlth_male', 'inf_fem', 'inf_male')
dat_norm_pol_prot <- as.data.frame(dat_norm_pol)
names <- sign_prot
dat_filtered <- dat_norm_pol_prot %>%
filter(row.names(dat_norm_pol_prot) %in% names)
dat_filtered <- as.matrix(dat_filtered)
fit_filter <- lmFit(dat_filtered, design = X_pol, method = "robust", maxit = 10000)
contrast.matrix <- makeContrasts(hlth_fem-hlth_male, hlth_fem-inf_fem, hlth_fem-inf_male, hlth_male-inf_male, hlth_male-inf_fem, inf_fem-inf_male, levels = X_pol)
fit2_filter <- contrasts.fit(fit_filter, contrast.matrix)
# Empirical Bayes statistics
efit_filter <- eBayes(fit2_filter)
#Differential expression table
topTable(efit_filter)
numGenes_filter <- length(dat_filtered)
full_list_efit_filter <- topTable(efit_filter, number = numGenes_filter)
write.csv(full_list_efit_filter,'Dif_expr_Polyascus_sPLS-DA.csv')
head(full_list_efit_filter)
#Data filtered
library(dplyr)
exp_fil <- data.frame(read.csv("Dif_expr_Polyascus_sPLS-DA.csv"))
exp_fil <- exp_fil %>%
rename(Accession = X)
descr <- dat[,c(3,46)]
descr_filtered <- descr %>%
filter(descr$Accession %in% names)
descr_fil_pol <- left_join(descr_filtered, exp_fil, by ='Accession')
descr_fil_pol$Protein <- as.factor(gsub("OS.*.", replacement = "", descr_fil_pol$Description))
write.csv(descr_fil_pol,'Dif_expr_Polyascus_sPLS-Da_description.csv')
#Differential expressed proteins
p_above_fil <- exp_fil$adj.P.Val <= 0.05
sum(p_above_fil)
p_protein_fil <- exp_fil[c(1:44), ]
accessions_fil <- p_protein_fil$Accession
write.csv(accessions_fil,'Dif_expr_sPLS-DA_Polyascus_44_prot.csv')
accessions2_fil <- p_protein_fil[, c(1, 11)]
accessions2_fil
acc <- accessions2_fil$Accession
pv_filt <- descr %>%
filter(descr$Accession %in% accessions2_fil$Accession)
descr_pv_pol <- left_join(pv_filt, accessions2_fil, by ='Accession')
descr_padj_pol <- descr_pv_pol %>%
arrange(adj.P.Val)
write.csv(descr_padj_pol,'Dif_expr_descr_sPLS-DA_Polyascus_44.csv')
######Tables with differential expression####
f_dif_all <- descr_fil_pol$adj.P.Val <= 0.05
f_dif_all2 <- descr_fil_pol[f_dif_all,]
for_obt_heatmap <- rownames(dat_filtered) %in% f_dif_all2$Accession
for_obt_heatmap_filt <- dat_filtered[for_obt_heatmap, ]
for_obt_heatmap_filt1 <- as.data.frame(for_obt_heatmap_filt)
for_obt_heatmap_filt1$Accession <- rownames(for_obt_heatmap_filt1)
heatmap_filt <- left_join(for_obt_heatmap_filt1, pv_filt, by ='Accession')
heatmap_descr <- heatmap_filt[!duplicated(heatmap_filt$Description), ]
row.names(heatmap_descr) <- heatmap_descr$Description
hm_descr <- heatmap_descr[c(1:4, 10:12, 14:22, 24:25, 27:28, 30:32, 34:37), 1:12]
row.names(hm_descr) <- gsub("OS.*.", replacement = "", row.names(hm_descr))
m_hm_descr <- as.matrix(hm_descr)
#Table without repeats
table_filt <- left_join(for_obt_heatmap_filt1, descr_padj_pol, by ='Accession')
table_descr <- table_filt[!duplicated(table_filt$Description), ]
row.names(table_descr) <- table_descr$Description
table_prot_descr <- table_descr[c(1:4, 10:12, 14:22, 24:25, 27:28, 30:32, 34:37), 14:15]
write.csv(table_prot_descr,'Dif_prot_final_sPLS-DA_descr_Pp_27.csv')
######Heatmap with accession####
library('ComplexHeatmap')
library('dendextend')
col_dend_acc <- hclust(dist(t(for_obt_heatmap_filt), method = 'euclidean'), method = 'average')
row_dend <- hclust(dist(for_obt_heatmap_filt, method = 'euclidean'), method = 'average')
list_col <- list(Groups = c('hf' = "#d200ff", 'hm' = "#04fb04", 'if' = "#fb0432", 'im' = "#048afb"))
heat_annot <- HeatmapAnnotation(Groups = fact_pol$Differentiation, col = list_col,
annotation_name_gp= gpar(fontsize = 15),
annotation_legend_param = list(Groups = list(
title = 'Groups', title_gp = gpar(fontsize = 15),
labels_gp = gpar(fontsize = 12),
at = c("hf", "hm", 'if', 'im'),
labels = c("Healthy female", "Healthy male",
'Infected female', 'Infected male'))))
heat_acc <- Heatmap(for_obt_heatmap_filt,
name = "Differential protein expression",
column_title = "Variables", row_title = "Samples",
row_names_gp = gpar(fontsize = 7),
cluster_rows = row_dend,
cluster_columns = color_branches(col_dend_acc, k = 3),
top_annotation = heat_annot)
######Heatmap with description#####
col_dend_descr <- hclust(dist(t(hm_descr), method = 'euclidean'), method = 'average')
heat_descr <- Heatmap(m_hm_descr,
name = "Expression",
column_title = "Variables", row_title = "Samples",
rect_gp = gpar(col = "white", lwd = 2),
row_names_gp = gpar(fontsize = 15),
row_names_max_width = max_text_width(
rownames(m_hm_descr),
gp = gpar(fontsize = 20)),
cluster_columns = color_branches(col_dend_descr, k = 2),
top_annotation = heat_annot,
heatmap_legend_param = list(labels_gp = gpar(fontsize = 12),
title_gp = gpar(fontsize = 15)))
dev.off()
tiff('heatmap_descr_gr.tiff', units="px", width=7000, height=4800, res=600, compression = 'none')
draw(heat_descr, heatmap_legend_side = "left", annotation_legend_side = "bottom")
dev.off()
#######Boxplots######
#Hemocyanin subunit 6 OS=Eriocheir sinensis 0.000554452
descr_fil_pol[descr_fil_pol$Accession == "K4EJG5|K4EJG5_ERISI", ]
boxplot(dat_filtered[c("K4EJG5|K4EJG5_ERISI"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#FTCD_N domain-containing protein OS=Scylla olivacea 0.000226069
descr_fil_pol[descr_fil_pol$Accession == "A0A0P4WFR3|A0A0P4WFR3_SCYOL", ]
boxplot(dat_filtered[c("A0A0P4WFR3|A0A0P4WFR3_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
# Proteasome subunit alpha type OS=Scylla olivacea 0.004762967
accessions2_fil[3,]
descr_fil_pol[descr_fil_pol$Accession == "A0A0N7ZDN1|A0A0N7ZDN1_SCYOL", ]
boxplot(dat_filtered[c("A0A0N7ZDN1|A0A0N7ZDN1_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Glucosamine-6-phosphate isomerase 0.005143184
accessions2_fil[5,]
descr_fil_pol[descr_fil_pol$Accession == "A0A5B7DES9|A0A5B7DES9_PORTR", ]
boxplot(dat_filtered[c("A0A5B7DES9|A0A5B7DES9_PORTR"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
View(descr_padj_pol)
#Glycogen debrancher OS=Scylla olivacea (?)
boxplot(dat_filtered[c("A0A0P4W3T4|A0A0P4W3T4_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Hemocyanin subunit 2
boxplot(dat_filtered[c("sp|C0HLU8|HCY_SCYSE"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Argininosuccinate lyase OS=Penaeus vannamei
boxplot(dat_filtered[c("A0A3R7MNM6|A0A3R7MNM6_PENVA"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#UTP--glucose-1-phosphate uridylyltransferase
boxplot(dat_filtered[c("A0A0P4WF84|A0A0P4WF84_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Histone H4 OS=Tigriopus californicus
boxplot(dat_filtered[c("A0A553NPY7|A0A553NPY7_TIGCA"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Calbindin-32-like
boxplot(dat_filtered[c("A0A6A7G3C7|A0A6A7G3C7_9CRUS"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Alpha-2-macroglobulin (Fragment)
boxplot(dat_filtered[c("A0A068J627|A0A068J627_ERISI"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Fructose-bisphosphate aldolase
boxplot(dat_filtered[c("A0A0P4W855|A0A0P4W855_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Paxillin
boxplot(dat_filtered[c("A0A0P4W3C8|A0A0P4W3C8_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Actin-related protein = supressor of profilin
boxplot(dat_filtered[c("A0A0P4WLJ6|A0A0P4WLJ6_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
# Transcriptional activator protein Pur-alpha
boxplot(dat_filtered[c("A0A5B7E8T1|A0A5B7E8T1_PORTR"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Ferritin
boxplot(dat_filtered[c("D6MXQ4|D6MXQ4_ERISI"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Thioredoxin
boxplot(dat_filtered[c("C4N5V0|C4N5V0_ERISI"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Glutathione transferase
boxplot(dat_filtered[c("A0A4D7AVC7|A0A4D7AVC7_ERISI"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Prostaglandin D synthase OS=Eriocheir sinensis
boxplot(dat_filtered[c("I6LWU2|I6LWU2_ERISI"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#14-3-3 protein
boxplot(dat_filtered[c("A0A385L4G6|A0A385L4G6_PROCL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
############sPLS-DA Volcano plot with proteins name############
tiff('fin_Vulcano_des_Polyascus_hf_if.tiff', units="px", width=6600, height=4800, res=600, compression = 'none')
EnhancedVolcano(descr_fil_pol,
lab = descr_fil_pol$Protein,
x = 'hlth_fem...inf_fem',
y = 'adj.P.Val', # ??? ????? adj. p.val? ? ?????? ?? ?????? p. val ?? ????????????
pCutoff = 0.05,
xlim = c(-3, 3),
ylim = c(0, 3),
FCcutoff = 1, # FCCutoff ????? ??????? ?????, ?? ???? 1 (???????? ? ??? ???? ? ?????). ??? ????, ??? ????? ???????? ???????? ?????.
title ="Healthy vs infected female crabs",
labSize = 6,
boxedLabels = F,
colAlpha = 1)
dev.off()
############Differential expression of all proteins#############
X_pol <- model.matrix(~ 0 + fact_pol$Differentiation)
X_pol
colnames(X_pol) <- c('hlth_fem', 'hlth_male', 'inf_fem', 'inf_male')
fit_pol <- lmFit(dat_norm_pol, design = X_pol, method = "robust", maxit = 10000)
contrast.matrix <- makeContrasts(hlth_fem-hlth_male, hlth_fem-inf_fem, hlth_male-inf_male, hlth_male-inf_fem, inf_fem-inf_male, inf_male-hlth_fem, levels = X_pol)
fit2_pol <- contrasts.fit(fit_pol, contrast.matrix)
# Empirical Bayes statistics
efit_pol <- eBayes(fit2_pol)
#Differential expression table
topTable(efit_pol)
numGenes_pol <- length(dat_norm_pol)
full_list_efit_pol <- topTable(efit_pol, number = numGenes_pol)
write.csv(full_list_efit_pol,'Dif_expr_Polyascus_all_proteins.csv')
head(full_list_efit_pol)
##########Volcano-plots##############
library(EnhancedVolcano)
head(full_list_efit_pol)
tiff('Vulcano_Polyascus_im_hf.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
EnhancedVolcano(full_list_efit_pol,
lab = rownames(full_list_efit_pol),
x = 'hlth_fem...inf_fem',
y = 'adj.P.Val', # ??? ????? adj. p.val? ? ?????? ?? ?????? p. val ?? ????????????
pCutoff = 0.05,
#xlim = c(-8, 10),
#ylim = c(0, 5),
FCcutoff = 1, # FCCutoff ????? ??????? ?????, ?? ???? 1 (???????? ? ??? ???? ? ?????). ??? ????, ??? ????? ???????? ???????? ?????.
title ="Vulcano",
labSize = 4.0,
boxedLabels = F,
colAlpha = 1)
dev.off()
############Volcano plots with proteins names############
experiment <- data.frame(read.csv("Vul_Dif_expr_Polyascus.csv"))
head(experiment)
descr <- dat[,c(3,46)]
head(descr)
descr_full_pol <- left_join(descr, experiment, by ='Accession')
descr_full_pol$Protein <- as.factor(gsub("OS.*.", replacement = "", descr_full_pol$Description))
write.csv(descr_full_pol,'Dif_expr_Polyascus_description.csv')
head(descr_full_pol)
tiff('Vulcano_Polyascus_hm_im.tiff', units="px", width=6600, height=4800, res=600, compression = 'none')
EnhancedVolcano(descr_full_pol,
lab = descr_full_pol$Protein,
x = 'hlth_fem...inf_fem',
y = 'adj.P.Val', # ??? ????? adj. p.val? ? ?????? ?? ?????? p. val ?? ????????????
pCutoff = 0.05,
xlim = c(-3, 3.5),
ylim = c(0, 3),
FCcutoff = 1, # FCCutoff ????? ??????? ?????, ?? ???? 1 (???????? ? ??? ???? ? ?????). ??? ????, ??? ????? ???????? ???????? ?????.
title ="Healthy vs infected female crabs",
labSize = 4.7,
boxedLabels = F,
colAlpha = 1)
dev.off()
#Differential expressed proteins
p_above_pol <- experiment$adj.P.Val <= 0.05
sum(p_above_pol)
p_protein <- experiment[c(1:26), ]
accessions2 <- p_protein$Accession
write.csv(accessions2,'Dif_expr_26_prot.csv')
accessions2 <- p_protein[, c(1, 11)]
head(accessions2)
names <- data.frame(read.csv("Dif_expr_26_prot_with_desc.csv"))
names <- names[, -1]
View(names)
######Heatmaps#############
library(gplots)
p_above_pol2 <- experiment[p_above_pol,]
head(p_above_pol2)
head(dat_norm_pol)
for_obt_hm_all <- rownames(dat_norm_pol) %in% p_above_pol2$Accession
for_obt_hm_all_f <- dat_norm_pol[for_obt_hm_all, ]
head(for_obt_hm_all_f)
for_obt_hm_all_f1 <- as.data.frame(for_obt_hm_all_f)
for_obt_hm_all_f1$Accession <- rownames(for_obt_hm_all_f1)
descr_for_all <- names[, -1]
hm_all_filt <- left_join(for_obt_hm_all_f1, descr_for_all, by ='Accession')
View(hm_all_filt)
hm_all_descr <- hm_all_filt[!duplicated(hm_all_filt$Description), ]
row.names(hm_all_descr) <- hm_all_descr$Description
View(hm_all_descr)
hm_descr2 <- hm_all_descr[c(1:6, 10:13, 15:16, 22:23), 1:12]
View(hm_descr2)
row.names(hm_descr2) <- gsub("OS.*.", replacement = "", row.names(hm_descr2))
head(hm_descr)
prot_table <- hm_all_descr[c(1:6, 10:13, 15:16, 22:23), 14:15]
View(prot_table)
write.csv(prot_table,'Dif_prot_padj_Pp.csv')
#С Accession
heatmap_obt <- colorpanel(75, low = '#00d2ff', high = '#ff004e')
heatmap.2(x = as.matrix(for_obt_hm_all_f), col = heatmap_obt, scale = 'none',
distfun = function(x) dist(x, method = 'euclidean'),
hclustfun = function(x) hclust(x, method = 'average'),
key = TRUE, symkey = FALSE, density.info = 'none',
trace = 'none', cexRow = 1, cexCol = 1, margins = c(6,9),
keysize = 1)
#with description
heatmap.2(x = as.matrix(hm_descr2), col = heatmap_obt, scale = 'none',
distfun = function(x) dist(x, method = 'euclidean'),
hclustfun = function(x) hclust(x, method = 'average'),
key = TRUE, symkey = FALSE, density.info = 'none',
trace = 'none', cexRow = 1, cexCol = 1, margins = c(5,20),
keysize = 0.7)
dev.off()
###############Boxplots for differential expressed proteins############
boxplot(dat_norm_pol[c("A0A2P2I8Q8|A0A2P2I8Q8_9CRUS"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
tiff('Hemocyanin_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("K4EJG5|K4EJG5_ERISI"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('FTCD_N domain-containing protein_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A0P4WFR3|A0A0P4WFR3_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('Proteasome subunit alpha type OS=Scylla olivacea_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A0N7ZDN1|A0A0N7ZDN1_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('Glucosamine-6-phosphate isomerase_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A5B7DES9|A0A5B7DES9_PORTR"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('Glycogen debrancher OS=Scylla olivacea_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A0P4W3T4|A0A0P4W3T4_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('Hemocyanin subunit 2 OS=Carcinus aestuarii _boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("sp|P84293|HCY2_CARAE"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('Argininosuccinate lyase OS=Penaeus vannamei_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A3R7MNM6|A0A3R7MNM6_PENVA"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('UTP--glucose-1-phosphate uridylyltransferase _boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A0P4WF84|A0A0P4WF84_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('Uncharacterized protein OS=Scylla olivacea_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A0P4VS89|A0A0P4VS89_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('Histone H4 OS=Tigriopus californicus_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A553NPY7|A0A553NPY7_TIGCA"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
|
/Diff_exp_nervous_tissue_Hemigrapsus_Polyascus.R
|
no_license
|
anlianguzova/Polyascus_polygeneus_host_nerv_prot_diff_exp
|
R
| false
| false
| 28,185
|
r
|
setwd('')
#Reading data
dat <- data.frame(read.csv("data/Polyascus_proteins.csv"))
#select Area
dat1_pol <- dat[,c(3, 18:29)]
rownames(dat1_pol) <- dat1_pol[,1]
dat1_pol <- dat1_pol[,-1]
head(dat1_pol)
#####################Information about the samples##################
library(readxl)
fact_pol <- data.frame(read_excel("data/Polyascus_fact.xlsx"))
rownames(fact_pol) <- fact_pol[,1]
fact_pol <- fact_pol[, -1]
head(fact_pol)
fact_pol$Differentiation <- as.factor(fact_pol$Differentiation)
fact_pol$Differentiation
#Infection status as a factor
fact_pol$Status <- as.factor(fact_pol$Status)
fact_pol$Status
#Sex as a factor
fact_pol$Sex <- as.factor(fact_pol$Sex)
fact_pol$Sex
colnames(dat1_pol) <- rownames(fact_pol)
###############Filter##############
#as Differentiation
h_fem <- dat1_pol[which(rowMeans(!is.na(dat1_pol[,rownames(subset(fact_pol,Differentiation=="hf"))])) >= 2/3), ]
h_male <- dat1_pol[which(rowMeans(!is.na(dat1_pol[,rownames(subset(fact_pol,Differentiation=="hm"))])) >= 2/3), ]
in_fem <- dat1_pol[which(rowMeans(!is.na(dat1_pol[,rownames(subset(fact_pol,Differentiation=="if"))])) >= 2/3), ]
in_male <- dat1_pol[which(rowMeans(!is.na(dat1_pol[,rownames(subset(fact_pol,Differentiation=="im"))])) >= 2/3), ]
#as infection status
healthy <- dat1_pol[which(rowMeans(!is.na(dat1_pol[,rownames(subset(fact_pol,Status=="healthy"))])) >= 5/6), ]
infected <- dat1_pol[which(rowMeans(!is.na(dat1_pol[,rownames(subset(fact_pol,Status=="infected"))])) >= 5/6), ]
################Venn diagram#########
library(devtools)
library(ggvenn)
vennn_pol <- list(hth_fem = rownames(h_fem), hth_male = rownames(h_male), inf_fem = rownames(in_fem), inf_male = rownames(in_male))
tiff('Polyascus_Venn_diagram.tiff', bg = 'transparent', width = 1200, height = 900)
ggvenn(
vennn_pol,
fill_color = c("#d200ff", "#04fb04", "#fb0432", "#048afb"),
stroke_size = 0.5, set_name_size = 8, text_size = 7,
)
dev.off()
venn_status <- list(Healthy = rownames(healthy), Infected = rownames(infected))
tiff('Polyascus_inf_Venn_diagram.tiff', bg = 'transparent', width = 1200, height = 900)
ggvenn(
venn_status,
fill_color = c("#50a4dc", "#ffe255"),
stroke_size = 0.5, set_name_size = 8, text_size = 7,
)
dev.off()
#Removing NA
colSums(is.na(dat1_pol))
dat2_pol <- dat1_pol[which(rowMeans(!is.na(dat1_pol)) > 0.85), ]
mean(complete.cases(dat2_pol))
colSums(is.na(dat2_pol))
#############Imputation#########
library(impute)
tdat_pol <- t(dat2_pol)
pol_knn1 <- impute.knn(tdat_pol, k = 5)
pol_knn <- t(pol_knn1$data)
head(pol_knn)
mean(complete.cases(pol_knn))
#Expression data distribution
library(RColorBrewer)
pal <- brewer.pal(n = 9, name = "Set1")
cols <- pal[fact_pol$Differentiation]
boxplot(pol_knn, outline = FALSE, col = cols, main = "Raw data")
legend("topright", levels(fact_pol$Differentiation), fill = pal, bty = "n", xpd = T)
colSums(pol_knn)
#######Log-transformation#######
dat_log_pol <- log2(pol_knn+1)
mean(complete.cases(dat_log_pol))
boxplot(dat_log_pol, outline = FALSE, col = cols, main = "Log-transformed data")
legend("topright", levels(fact_pol$Differentiation), fill = pal, bty = "n", xpd = T)
##########Quantile normalization##########
library(limma)
dat_norm_pol <- normalizeQuantiles(dat_log_pol)
boxplot(dat_norm_pol, col = cols, main = "Normalized data")
legend("topright", levels(fact_pol$Differentiation), fill = pal, bty = "n", xpd = T)
mean(complete.cases(dat_norm_pol))
colSums(is.na(dat_norm_pol))
###########nMDS##########
library('vegan')
tdat_norm_pol <- t(dat_norm_pol)
pol_ord <- metaMDS(tdat_norm_pol,
distance = "euclidean",
autotransform = FALSE)
pol_ord$stress
#######nMDS ggplot######
nmds_scrs <- as.data.frame(scores(pol_ord, display = "sites"))
nmds_scrs <- cbind(nmds_scrs, Differentiation = fact_pol$Differentiation)
pal_colour <- c("#d200ff", "#04fb04", "#fb0432", "#048afb")
nmds_plot <- ggplot(nmds_scrs, aes(x = NMDS1, y = NMDS2)) +
scale_colour_manual(values = pal_colour) +
geom_polygon(aes(group = factor(Differentiation), colour = factor(Differentiation)), size = 1, fill = 'transparent') +
geom_point(aes(x = NMDS1, y = NMDS2, colour = factor(Differentiation)), size = 4) +
coord_fixed() +
theme_classic() +
labs(colour = 'Differentiation') +
theme(legend.position = "right", legend.text = element_text(size = 12), legend.title = element_text(size = 12), axis.text = element_text(size = 12))
tiff('nMDS.tiff', units="in", width=12, height=8, res=600, compression = 'lzw')
nmds_plot
dev.off()
#######PLS-DA#############
library(mixOmics)
plsda_pol <- plsda(tdat_norm_pol, fact_pol$Differentiation, ncomp = 11)
symb <- c(16)
tiff('plsda.tiff', units="in", width=12, height=8, res=600, compression = 'lzw')
plotIndiv(plsda_pol, legend = TRUE, ellipse = TRUE, ind.names = FALSE,
pch = symb, col = pal_colour, title = 'PLS-DA')
dev.off()
#####################sPLS-DA################
#Model selection
list.keepX <- c(5:10, seq(20, 100, 10))
opt_val <- tune.splsda(tdat_norm_pol, fact_pol$Differentiation, ncomp = 3, nrepeat = 1000, folds = 3, test.keepX = list.keepX, cpus = 6)
opt_val$choice.keepX
opt_val$choice.ncomp
pal_colour <- c("#d200ff", "#04fb04", "#fb0432", "#048afb")
symb <- c(16)
splsda_pol <- splsda(tdat_norm_pol, fact_pol$Differentiation, ncomp = 2, keepX = c(40, 20))
tiff('sPLS-DA.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
plotIndiv(splsda_pol, legend = TRUE, ellipse = TRUE, ind.names = FALSE,
pch = symb, col = pal_colour, title = 'sPLS-DA')
dev.off()
#####Loadings####
library('dplyr')
plsda_load <- splsda_pol$loadings
plsda_mat <- plsda_load$X
plsda_mat <- as.data.frame(plsda_mat)
comp1_plot <- plotLoadings(splsda_pol, comp = 1, method = 'mean', contrib = 'max', ndisplay = 20,
title = 'Loading 1 component', size.title = 2, legend = TRUE)
comp2_plot <- plotLoadings(splsda_pol, comp = 2, method = 'mean', contrib = 'max', ndisplay = 20,
title = 'Loadings 2 component', size.title = 2, legend = FALSE)
dev.off()
#####Component 1####
comp1_loadings <- plsda_load$X[,1]
comp1_loadings <- as.data.frame(comp1_loadings)
max_comp1 <- comp1_loadings %>%
arrange(desc(abs(comp1_loadings))) %>%
slice(1:20)
max_comp1$Accession <- row.names(max_comp1)
group_contrib <- comp1_plot$GroupContrib
max1_contr <- cbind(max_comp1, group_contrib)
descr <- dat[,c(3,46)]
load_filtered <- descr %>%
filter(descr$Accession %in% max1_contr$Accession)
max_comp1_descr <- left_join(max1_contr, load_filtered, by = 'Accession')
max_comp1_descr <- max_comp1_descr %>%
rename(Loadings = comp1_loadings)
max_comp1_descr <- max_comp1_descr %>%
rename(Contributions = group_contrib)
max1_descr <- max_comp1_descr[c(1,2,4:9,11:15,20),]
row.names(max1_descr) <- max1_descr$Description
max1_descr <- max1_descr[c(1:10),c(1,3,4)]
row.names(max1_descr) <- gsub("OS.*.", replacement = "", row.names(max1_descr))
max1_descr$Description <- gsub("OS.*.", replacement = "", max1_descr$Description)
pal_colour <- c("#d200ff", "#04fb04", "#fb0432", "#048afb")
bar_colour <- c("#d200ff", "#04fb04", "#048afb")
comp1_bar <- ggplot(max1_descr, aes(x = reorder(Description, abs(Loadings)), y = Loadings, fill = Contributions)) +
geom_bar(stat = 'identity', colour = 'black') + xlab('Proteins') + coord_flip() +
scale_fill_manual(values = bar_colour) +
theme_classic() +
theme(axis.text = element_text(size = 25), axis.title = element_text(size = 30),
legend.title = element_text(size = 30), legend.text = element_text(size = 25),
legend.spacing.y = unit(1, 'cm')) +
guides(fill = guide_legend(byrow = TRUE))
tiff('Loadings1.tiff', units="px", width=13200, height=5500, res=600, compression = 'none')
comp1_bar
dev.off()
#####Component2####
comp2_loadings <- plsda_load$X[,2]
comp2_loadings <- as.data.frame(comp2_loadings)
max_comp2 <- comp2_loadings %>%
arrange(desc(abs(comp2_loadings))) %>%
slice(1:20)
max_comp2$Accession <- row.names(max_comp2)
group_contrib2 <- comp2_plot$GroupContrib
max2_contr <- cbind(max_comp2, group_contrib2)
descr <- dat[,c(3,46)]
load_filtered2 <- descr %>%
filter(descr$Accession %in% max2_contr$Accession)
max_comp2_descr <- left_join(max2_contr, load_filtered2, by = 'Accession')
max_comp2_descr <- max_comp2_descr %>%
rename(Loadings = comp2_loadings)
max_comp2_descr <- max_comp2_descr %>%
rename(Contributions = group_contrib2)
max2_descr <- max_comp2_descr[c(1,4:8,10,14,17:20),]
row.names(max2_descr) <- max2_descr$Description
max2_descr <- max2_descr[c(1:10),c(1,3,4)]
max2_descr$Description <- gsub("OS.*.", replacement = "", max2_descr$Description)
pal_colour <- c("#d200ff", "#04fb04", "#fb0432", "#048afb")
comp2_bar <- ggplot(max2_descr, aes(x = reorder(makeUnique(Description), abs(Loadings)), y = Loadings, fill = Contributions)) +
geom_bar(stat = 'identity', colour = 'black') + xlab('Proteins') + coord_flip() +
scale_fill_manual(values = pal_colour) +
theme_classic() +
theme(axis.text = element_text(size = 25), axis.title = element_text(size = 30),
legend.title = element_text(size = 30), legend.text = element_text(size = 25),
legend.spacing.y = unit(1, 'cm')) +
guides(fill = guide_legend(byrow = TRUE))
tiff('Loadings2.tiff', units="px", width=13200, height=5500, res=600, compression = 'none')
comp2_bar
dev.off()
###############Filtered proteins##############
sign_prot <- rownames(filter(plsda_mat, comp1 != 0 | comp2 != 0))
head(sign_prot)
#########Differential expression analysis############
X_pol <- model.matrix(~ 0 + fact_pol$Differentiation)
X_pol
colnames(X_pol) <- c('hlth_fem', 'hlth_male', 'inf_fem', 'inf_male')
dat_norm_pol_prot <- as.data.frame(dat_norm_pol)
names <- sign_prot
dat_filtered <- dat_norm_pol_prot %>%
filter(row.names(dat_norm_pol_prot) %in% names)
dat_filtered <- as.matrix(dat_filtered)
fit_filter <- lmFit(dat_filtered, design = X_pol, method = "robust", maxit = 10000)
contrast.matrix <- makeContrasts(hlth_fem-hlth_male, hlth_fem-inf_fem, hlth_fem-inf_male, hlth_male-inf_male, hlth_male-inf_fem, inf_fem-inf_male, levels = X_pol)
fit2_filter <- contrasts.fit(fit_filter, contrast.matrix)
# Empirical Bayes statistics
efit_filter <- eBayes(fit2_filter)
#Differential expression table
topTable(efit_filter)
numGenes_filter <- length(dat_filtered)
full_list_efit_filter <- topTable(efit_filter, number = numGenes_filter)
write.csv(full_list_efit_filter,'Dif_expr_Polyascus_sPLS-DA.csv')
head(full_list_efit_filter)
#Data filtered
library(dplyr)
exp_fil <- data.frame(read.csv("Dif_expr_Polyascus_sPLS-DA.csv"))
exp_fil <- exp_fil %>%
rename(Accession = X)
descr <- dat[,c(3,46)]
descr_filtered <- descr %>%
filter(descr$Accession %in% names)
descr_fil_pol <- left_join(descr_filtered, exp_fil, by ='Accession')
descr_fil_pol$Protein <- as.factor(gsub("OS.*.", replacement = "", descr_fil_pol$Description))
write.csv(descr_fil_pol,'Dif_expr_Polyascus_sPLS-Da_description.csv')
#Differential expressed proteins
p_above_fil <- exp_fil$adj.P.Val <= 0.05
sum(p_above_fil)
p_protein_fil <- exp_fil[c(1:44), ]
accessions_fil <- p_protein_fil$Accession
write.csv(accessions_fil,'Dif_expr_sPLS-DA_Polyascus_44_prot.csv')
accessions2_fil <- p_protein_fil[, c(1, 11)]
accessions2_fil
acc <- accessions2_fil$Accession
pv_filt <- descr %>%
filter(descr$Accession %in% accessions2_fil$Accession)
descr_pv_pol <- left_join(pv_filt, accessions2_fil, by ='Accession')
descr_padj_pol <- descr_pv_pol %>%
arrange(adj.P.Val)
write.csv(descr_padj_pol,'Dif_expr_descr_sPLS-DA_Polyascus_44.csv')
######Tables with differential expression####
f_dif_all <- descr_fil_pol$adj.P.Val <= 0.05
f_dif_all2 <- descr_fil_pol[f_dif_all,]
for_obt_heatmap <- rownames(dat_filtered) %in% f_dif_all2$Accession
for_obt_heatmap_filt <- dat_filtered[for_obt_heatmap, ]
for_obt_heatmap_filt1 <- as.data.frame(for_obt_heatmap_filt)
for_obt_heatmap_filt1$Accession <- rownames(for_obt_heatmap_filt1)
heatmap_filt <- left_join(for_obt_heatmap_filt1, pv_filt, by ='Accession')
heatmap_descr <- heatmap_filt[!duplicated(heatmap_filt$Description), ]
row.names(heatmap_descr) <- heatmap_descr$Description
hm_descr <- heatmap_descr[c(1:4, 10:12, 14:22, 24:25, 27:28, 30:32, 34:37), 1:12]
row.names(hm_descr) <- gsub("OS.*.", replacement = "", row.names(hm_descr))
m_hm_descr <- as.matrix(hm_descr)
#Table without repeats
table_filt <- left_join(for_obt_heatmap_filt1, descr_padj_pol, by ='Accession')
table_descr <- table_filt[!duplicated(table_filt$Description), ]
row.names(table_descr) <- table_descr$Description
table_prot_descr <- table_descr[c(1:4, 10:12, 14:22, 24:25, 27:28, 30:32, 34:37), 14:15]
write.csv(table_prot_descr,'Dif_prot_final_sPLS-DA_descr_Pp_27.csv')
######Heatmap with accession####
library('ComplexHeatmap')
library('dendextend')
col_dend_acc <- hclust(dist(t(for_obt_heatmap_filt), method = 'euclidean'), method = 'average')
row_dend <- hclust(dist(for_obt_heatmap_filt, method = 'euclidean'), method = 'average')
list_col <- list(Groups = c('hf' = "#d200ff", 'hm' = "#04fb04", 'if' = "#fb0432", 'im' = "#048afb"))
heat_annot <- HeatmapAnnotation(Groups = fact_pol$Differentiation, col = list_col,
annotation_name_gp= gpar(fontsize = 15),
annotation_legend_param = list(Groups = list(
title = 'Groups', title_gp = gpar(fontsize = 15),
labels_gp = gpar(fontsize = 12),
at = c("hf", "hm", 'if', 'im'),
labels = c("Healthy female", "Healthy male",
'Infected female', 'Infected male'))))
heat_acc <- Heatmap(for_obt_heatmap_filt,
name = "Differential protein expression",
column_title = "Variables", row_title = "Samples",
row_names_gp = gpar(fontsize = 7),
cluster_rows = row_dend,
cluster_columns = color_branches(col_dend_acc, k = 3),
top_annotation = heat_annot)
######Heatmap with description#####
col_dend_descr <- hclust(dist(t(hm_descr), method = 'euclidean'), method = 'average')
heat_descr <- Heatmap(m_hm_descr,
name = "Expression",
column_title = "Variables", row_title = "Samples",
rect_gp = gpar(col = "white", lwd = 2),
row_names_gp = gpar(fontsize = 15),
row_names_max_width = max_text_width(
rownames(m_hm_descr),
gp = gpar(fontsize = 20)),
cluster_columns = color_branches(col_dend_descr, k = 2),
top_annotation = heat_annot,
heatmap_legend_param = list(labels_gp = gpar(fontsize = 12),
title_gp = gpar(fontsize = 15)))
dev.off()
tiff('heatmap_descr_gr.tiff', units="px", width=7000, height=4800, res=600, compression = 'none')
draw(heat_descr, heatmap_legend_side = "left", annotation_legend_side = "bottom")
dev.off()
#######Boxplots######
#Hemocyanin subunit 6 OS=Eriocheir sinensis 0.000554452
descr_fil_pol[descr_fil_pol$Accession == "K4EJG5|K4EJG5_ERISI", ]
boxplot(dat_filtered[c("K4EJG5|K4EJG5_ERISI"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#FTCD_N domain-containing protein OS=Scylla olivacea 0.000226069
descr_fil_pol[descr_fil_pol$Accession == "A0A0P4WFR3|A0A0P4WFR3_SCYOL", ]
boxplot(dat_filtered[c("A0A0P4WFR3|A0A0P4WFR3_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
# Proteasome subunit alpha type OS=Scylla olivacea 0.004762967
accessions2_fil[3,]
descr_fil_pol[descr_fil_pol$Accession == "A0A0N7ZDN1|A0A0N7ZDN1_SCYOL", ]
boxplot(dat_filtered[c("A0A0N7ZDN1|A0A0N7ZDN1_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Glucosamine-6-phosphate isomerase 0.005143184
accessions2_fil[5,]
descr_fil_pol[descr_fil_pol$Accession == "A0A5B7DES9|A0A5B7DES9_PORTR", ]
boxplot(dat_filtered[c("A0A5B7DES9|A0A5B7DES9_PORTR"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
View(descr_padj_pol)
#Glycogen debrancher OS=Scylla olivacea (?)
boxplot(dat_filtered[c("A0A0P4W3T4|A0A0P4W3T4_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Hemocyanin subunit 2
boxplot(dat_filtered[c("sp|C0HLU8|HCY_SCYSE"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Argininosuccinate lyase OS=Penaeus vannamei
boxplot(dat_filtered[c("A0A3R7MNM6|A0A3R7MNM6_PENVA"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#UTP--glucose-1-phosphate uridylyltransferase
boxplot(dat_filtered[c("A0A0P4WF84|A0A0P4WF84_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Histone H4 OS=Tigriopus californicus
boxplot(dat_filtered[c("A0A553NPY7|A0A553NPY7_TIGCA"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Calbindin-32-like
boxplot(dat_filtered[c("A0A6A7G3C7|A0A6A7G3C7_9CRUS"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Alpha-2-macroglobulin (Fragment)
boxplot(dat_filtered[c("A0A068J627|A0A068J627_ERISI"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Fructose-bisphosphate aldolase
boxplot(dat_filtered[c("A0A0P4W855|A0A0P4W855_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Paxillin
boxplot(dat_filtered[c("A0A0P4W3C8|A0A0P4W3C8_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Actin-related protein = supressor of profilin
boxplot(dat_filtered[c("A0A0P4WLJ6|A0A0P4WLJ6_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
# Transcriptional activator protein Pur-alpha
boxplot(dat_filtered[c("A0A5B7E8T1|A0A5B7E8T1_PORTR"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Ferritin
boxplot(dat_filtered[c("D6MXQ4|D6MXQ4_ERISI"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Thioredoxin
boxplot(dat_filtered[c("C4N5V0|C4N5V0_ERISI"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Glutathione transferase
boxplot(dat_filtered[c("A0A4D7AVC7|A0A4D7AVC7_ERISI"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#Prostaglandin D synthase OS=Eriocheir sinensis
boxplot(dat_filtered[c("I6LWU2|I6LWU2_ERISI"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
#14-3-3 protein
boxplot(dat_filtered[c("A0A385L4G6|A0A385L4G6_PROCL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
############sPLS-DA Volcano plot with proteins name############
tiff('fin_Vulcano_des_Polyascus_hf_if.tiff', units="px", width=6600, height=4800, res=600, compression = 'none')
EnhancedVolcano(descr_fil_pol,
lab = descr_fil_pol$Protein,
x = 'hlth_fem...inf_fem',
y = 'adj.P.Val', # ??? ????? adj. p.val? ? ?????? ?? ?????? p. val ?? ????????????
pCutoff = 0.05,
xlim = c(-3, 3),
ylim = c(0, 3),
FCcutoff = 1, # FCCutoff ????? ??????? ?????, ?? ???? 1 (???????? ? ??? ???? ? ?????). ??? ????, ??? ????? ???????? ???????? ?????.
title ="Healthy vs infected female crabs",
labSize = 6,
boxedLabels = F,
colAlpha = 1)
dev.off()
############Differential expression of all proteins#############
X_pol <- model.matrix(~ 0 + fact_pol$Differentiation)
X_pol
colnames(X_pol) <- c('hlth_fem', 'hlth_male', 'inf_fem', 'inf_male')
fit_pol <- lmFit(dat_norm_pol, design = X_pol, method = "robust", maxit = 10000)
contrast.matrix <- makeContrasts(hlth_fem-hlth_male, hlth_fem-inf_fem, hlth_male-inf_male, hlth_male-inf_fem, inf_fem-inf_male, inf_male-hlth_fem, levels = X_pol)
fit2_pol <- contrasts.fit(fit_pol, contrast.matrix)
# Empirical Bayes statistics
efit_pol <- eBayes(fit2_pol)
#Differential expression table
topTable(efit_pol)
numGenes_pol <- length(dat_norm_pol)
full_list_efit_pol <- topTable(efit_pol, number = numGenes_pol)
write.csv(full_list_efit_pol,'Dif_expr_Polyascus_all_proteins.csv')
head(full_list_efit_pol)
##########Volcano-plots##############
library(EnhancedVolcano)
head(full_list_efit_pol)
tiff('Vulcano_Polyascus_im_hf.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
EnhancedVolcano(full_list_efit_pol,
lab = rownames(full_list_efit_pol),
x = 'hlth_fem...inf_fem',
y = 'adj.P.Val', # ??? ????? adj. p.val? ? ?????? ?? ?????? p. val ?? ????????????
pCutoff = 0.05,
#xlim = c(-8, 10),
#ylim = c(0, 5),
FCcutoff = 1, # FCCutoff ????? ??????? ?????, ?? ???? 1 (???????? ? ??? ???? ? ?????). ??? ????, ??? ????? ???????? ???????? ?????.
title ="Vulcano",
labSize = 4.0,
boxedLabels = F,
colAlpha = 1)
dev.off()
############Volcano plots with proteins names############
experiment <- data.frame(read.csv("Vul_Dif_expr_Polyascus.csv"))
head(experiment)
descr <- dat[,c(3,46)]
head(descr)
descr_full_pol <- left_join(descr, experiment, by ='Accession')
descr_full_pol$Protein <- as.factor(gsub("OS.*.", replacement = "", descr_full_pol$Description))
write.csv(descr_full_pol,'Dif_expr_Polyascus_description.csv')
head(descr_full_pol)
tiff('Vulcano_Polyascus_hm_im.tiff', units="px", width=6600, height=4800, res=600, compression = 'none')
EnhancedVolcano(descr_full_pol,
lab = descr_full_pol$Protein,
x = 'hlth_fem...inf_fem',
y = 'adj.P.Val', # ??? ????? adj. p.val? ? ?????? ?? ?????? p. val ?? ????????????
pCutoff = 0.05,
xlim = c(-3, 3.5),
ylim = c(0, 3),
FCcutoff = 1, # FCCutoff ????? ??????? ?????, ?? ???? 1 (???????? ? ??? ???? ? ?????). ??? ????, ??? ????? ???????? ???????? ?????.
title ="Healthy vs infected female crabs",
labSize = 4.7,
boxedLabels = F,
colAlpha = 1)
dev.off()
#Differential expressed proteins
p_above_pol <- experiment$adj.P.Val <= 0.05
sum(p_above_pol)
p_protein <- experiment[c(1:26), ]
accessions2 <- p_protein$Accession
write.csv(accessions2,'Dif_expr_26_prot.csv')
accessions2 <- p_protein[, c(1, 11)]
head(accessions2)
names <- data.frame(read.csv("Dif_expr_26_prot_with_desc.csv"))
names <- names[, -1]
View(names)
######Heatmaps#############
library(gplots)
p_above_pol2 <- experiment[p_above_pol,]
head(p_above_pol2)
head(dat_norm_pol)
for_obt_hm_all <- rownames(dat_norm_pol) %in% p_above_pol2$Accession
for_obt_hm_all_f <- dat_norm_pol[for_obt_hm_all, ]
head(for_obt_hm_all_f)
for_obt_hm_all_f1 <- as.data.frame(for_obt_hm_all_f)
for_obt_hm_all_f1$Accession <- rownames(for_obt_hm_all_f1)
descr_for_all <- names[, -1]
hm_all_filt <- left_join(for_obt_hm_all_f1, descr_for_all, by ='Accession')
View(hm_all_filt)
hm_all_descr <- hm_all_filt[!duplicated(hm_all_filt$Description), ]
row.names(hm_all_descr) <- hm_all_descr$Description
View(hm_all_descr)
hm_descr2 <- hm_all_descr[c(1:6, 10:13, 15:16, 22:23), 1:12]
View(hm_descr2)
row.names(hm_descr2) <- gsub("OS.*.", replacement = "", row.names(hm_descr2))
head(hm_descr)
prot_table <- hm_all_descr[c(1:6, 10:13, 15:16, 22:23), 14:15]
View(prot_table)
write.csv(prot_table,'Dif_prot_padj_Pp.csv')
#С Accession
heatmap_obt <- colorpanel(75, low = '#00d2ff', high = '#ff004e')
heatmap.2(x = as.matrix(for_obt_hm_all_f), col = heatmap_obt, scale = 'none',
distfun = function(x) dist(x, method = 'euclidean'),
hclustfun = function(x) hclust(x, method = 'average'),
key = TRUE, symkey = FALSE, density.info = 'none',
trace = 'none', cexRow = 1, cexCol = 1, margins = c(6,9),
keysize = 1)
#with description
heatmap.2(x = as.matrix(hm_descr2), col = heatmap_obt, scale = 'none',
distfun = function(x) dist(x, method = 'euclidean'),
hclustfun = function(x) hclust(x, method = 'average'),
key = TRUE, symkey = FALSE, density.info = 'none',
trace = 'none', cexRow = 1, cexCol = 1, margins = c(5,20),
keysize = 0.7)
dev.off()
###############Boxplots for differential expressed proteins############
boxplot(dat_norm_pol[c("A0A2P2I8Q8|A0A2P2I8Q8_9CRUS"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
tiff('Hemocyanin_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("K4EJG5|K4EJG5_ERISI"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('FTCD_N domain-containing protein_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A0P4WFR3|A0A0P4WFR3_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('Proteasome subunit alpha type OS=Scylla olivacea_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A0N7ZDN1|A0A0N7ZDN1_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('Glucosamine-6-phosphate isomerase_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A5B7DES9|A0A5B7DES9_PORTR"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('Glycogen debrancher OS=Scylla olivacea_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A0P4W3T4|A0A0P4W3T4_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('Hemocyanin subunit 2 OS=Carcinus aestuarii _boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("sp|P84293|HCY2_CARAE"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('Argininosuccinate lyase OS=Penaeus vannamei_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A3R7MNM6|A0A3R7MNM6_PENVA"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('UTP--glucose-1-phosphate uridylyltransferase _boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A0P4WF84|A0A0P4WF84_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('Uncharacterized protein OS=Scylla olivacea_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A0P4VS89|A0A0P4VS89_SCYOL"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
tiff('Histone H4 OS=Tigriopus californicus_boxplot_norm.tiff', units="in", width=11, height=8, res=600, compression = 'lzw')
boxplot(dat_norm_pol[c("A0A553NPY7|A0A553NPY7_TIGCA"),] ~ Differentiation, data = fact_pol,
varwidth = TRUE, log = "y", las = 1)
dev.off()
|
install.packages("Rserve")
library(Rserve)
Rserve(FALSE, port = 6311, args = '--RS-encoding utf8 --no-save --slave --encoding --internet2')
Rserve(args = "--RS- encoding utf8")
# png - java
setwd('C:\\Users\\Win10\\Desktop\\Backup\\국비반자료\\upload')
library(ggplot2)
mpg <- as.data.frame(mpg)
png(width = 500, height = 500, filename = '파일이름.png')
ggplot(mpg, aes(x = class, fill = class)) + geom_bar()
graphics.off()
# R도 프로그램언어라 출력구문이 가능
print("출력문~~~~~~~~~~~")
setwd('C:\\Users\\Win10\\Desktop\\Backup\\국비반자료\\upload')
library(ggplot2)
mpg <- as.data.frame(mpg)
print(png(width = 500, height = 500, filename = '파일이름1.png'))
print(ggplot(mpg, aes(x = class, fill = class)) + geom_bar())
print(graphics.off())
##### 파이프라인 -> 자바(png 출력)
library(ggplot2)
library(dplyr)
d1 <- as.data.frame(mpg) %>%
group_by(manufacturer) %>%
summarise(mean_hwy = mean(hwy))
print(png(width = 500, height = 500, filename = 'd1.png'))
print(ggplot(d1, aes(x = reorder(manufacturer, -mean_hwy), y = mean_hwy, fill = manufacturer)) +
geom_col() +
coord_flip())
print(graphics.off())
|
/basic_r/code/05분석결과보고서/script03(Reserve).R
|
no_license
|
JaeHyun-Ban/GB_R
|
R
| false
| false
| 1,195
|
r
|
install.packages("Rserve")
library(Rserve)
Rserve(FALSE, port = 6311, args = '--RS-encoding utf8 --no-save --slave --encoding --internet2')
Rserve(args = "--RS- encoding utf8")
# png - java
setwd('C:\\Users\\Win10\\Desktop\\Backup\\국비반자료\\upload')
library(ggplot2)
mpg <- as.data.frame(mpg)
png(width = 500, height = 500, filename = '파일이름.png')
ggplot(mpg, aes(x = class, fill = class)) + geom_bar()
graphics.off()
# R도 프로그램언어라 출력구문이 가능
print("출력문~~~~~~~~~~~")
setwd('C:\\Users\\Win10\\Desktop\\Backup\\국비반자료\\upload')
library(ggplot2)
mpg <- as.data.frame(mpg)
print(png(width = 500, height = 500, filename = '파일이름1.png'))
print(ggplot(mpg, aes(x = class, fill = class)) + geom_bar())
print(graphics.off())
##### 파이프라인 -> 자바(png 출력)
library(ggplot2)
library(dplyr)
d1 <- as.data.frame(mpg) %>%
group_by(manufacturer) %>%
summarise(mean_hwy = mean(hwy))
print(png(width = 500, height = 500, filename = 'd1.png'))
print(ggplot(d1, aes(x = reorder(manufacturer, -mean_hwy), y = mean_hwy, fill = manufacturer)) +
geom_col() +
coord_flip())
print(graphics.off())
|
rm(list=ls())
library(FLCore)
#library(FLFleet)
#library(FLAssess)
#library(FLSAM)
library(MASS)
#library(msm)
for (opt in c(4:16,21))
{
cat("Management opt",opt,"\n")
# permanent changes?
perm<-F
if (perm)cat("!!! scenario with permanent changes")
# numer of years for the projections
prlength <- 40
# reduce the number of iterations to
short<-T
nits2<-1000
scen <- c("LTMP3.2mt") #
TACvarlim <- T # whether or not to apply the 20% limit on TAC change
Fvarlim <- T # whether or not to apply the 10% limit on Fbar change
BBscen <- "noBB" # banking borrowing options :
# "Banking" : always bank
# "Borrowing" : always borrow
# "AlternateBank" : bank first and then alternate
# "AlternateBorrow" : borrow first and then alternate
# "MinVar" : use BB to minise TAC variability
LastRecOp <- "geom" # option to replace the last estimated recruitment : "SAM", "geom", "RCT3"
# "SAM" = don't overwrite the SAM estimmate
# "geom" = replace by geomean 1990/(TaY-1)
# "RCT3" = replace by RCT3 output
# run the MSE
codePath <- "W:/IMARES/Data/ICES-WG/WKMACLTMP/R code/Sensitivity perm/"
if(substr(R.Version()$os,1,3)== "lin")
{
codePath <- sub("W:/IMARES/Data/ICES-WG/","/media/w/",codePath)
}
wine <- F
path <- "W:/IMARES/Data/ICES-WG/WKMACLTMP/"
inPath <- "W:/IMARES/Data/ICES-WG/WKMACLTMP/Data/"
codePath <- "W:/IMARES/Data/ICES-WG/WKMACLTMP/R code/"
outPath <- "W:/IMARES/Data/ICES-WG/WKMACLTMP/Results/"
if(substr(R.Version()$os,1,3)== "lin"){
path <- sub("W:/IMARES/Data/ICES-WG/","/media/w/",path)
inPath <- sub("W:/IMARES/Data/ICES-WG/","/media/w/",inPath)
codePath <- sub("W:/IMARES/Data/ICES-WG/","/media/w/",codePath)
outPath <- sub("W:/IMARES/Data/ICES-WG/","/media/w/",outPath)
}
home<-F
if(home)
{
path <- "D://MSE/"
inPath <- "D://MSE/Data/"
codePath <- "D://MSE/R code/"
outPath <- "D://MSE/Results/"
}
outPathp<-outPath
if (perm) outPathp <- paste(outPath,"perm",sep="")
#- Load objects
load(file=paste(outPath,"Mac.RData", sep=""))
load(file=paste(outPath,"Macctrl.RData", sep=""))
load(file=paste(outPathp,"biol.RData", sep=""))
load(file=paste(outPathp,"fishery.RData", sep=""))
load(file=paste(outPath,"propN.RData", sep=""))
load(file=paste(outPath,"propWt.RData", sep=""))
load(file=paste(outPath,"ctch.RData", sep=""))
load(file=paste(outPath,"surveys.RData", sep=""))
load(file=paste(outPathp,"stocks.RData", sep=""))
load(file=paste(outPath,"settings.RData", sep=""))
load(file=paste(outPath,"SRmod.RData", sep=""))
load(file=paste(outPath,"resRFinal.RData", sep=""))
for(i in 1:length(settings)) assign(x=names(settings)[i],value=settings[[i]])
source(paste(codePath,"functions.r", sep=""))
source(paste(codePath,"04_forecastScenarios.r", sep=""))
nits<-nits2
#- Settings
load(file=paste(outPath,"settings.RData",sep=""))
histMinYr <- settings$histMinYr
histMaxYr <- settings$histMaxYr
nyrs <- settings$nyrs
futureMaxYr <- settings$futureMaxYr
histPeriod <- settings$histPeriod
projPeriod <- settings$projPeriod
recrPeriod <- settings$recrPeriod
selPeriod <- settings$selPeriod
fecYears <- settings$fecYears
nits <- settings$nits
RecType<-settings$RecType
# set manually the projection period
projPeriod <-ac((histMaxYr+1):(histMaxYr+prlength))
futureMaxYr <- an(rev(projPeriod)[1] )
settings$projPeriod<-projPeriod
settings$futureMaxYr<-futureMaxYr
nits<-nits2
# run a shorter number of iterations : has to be 100
short<-T
if (short)
{
nits <- nits2
biol <- biol[,,,,,1:nits]
fishery <- iter(fishery,1:nits)
ctch <- ctch[,,,,,1:nits]
Rindex <- Rindex[,,,,,1:nits]
stocks <- stocks[,,,,,1:nits]
SRmod <- SRmod[1:nits,]
devR <- devR[,,,,,1:nits]
}
load(file=paste(outPath,"resNFinal_simple.RData", sep=""))
load(file=paste(outPath,"resFFinal_simple.RData", sep=""))
if (short)
{
devN<-devN[,,,,,1:nits]
devF<-devF[,,,,,1:nits]
}
#
#------------------------------------------------------------------------------#
# 0) setup TACS & F's and Survivors and maximum change in effort
#------------------------------------------------------------------------------#
maxEff <- 1000
TAC <- FLQuant(NA,dimnames=list(age="all",year=histMinYr:(futureMaxYr+3),unit=c("A"),season="all",area=1,iter=1:nits))
TAC[,ac(2000:2014),"A"] <- c(612,670,683,583,532,422,444,502,458,605,885,959,927,906,1396) *1000
#TACusage <- FLQuant(array(rep(c(rep(1,length(histMinYr:futureMaxYr)+3),rep(0.539755,length(histMinYr:futureMaxYr)+3),
# rep(1,length(histMinYr:futureMaxYr)+3),rep(1,length(histMinYr:futureMaxYr)+3)),nits),dim=c(1,length(histMinYr:futureMaxYr)+3,4,1,1,nits)),dimnames=dimnames(TAC[,ac(histMinYr:(futureMaxYr+3))]))
TACusage <- FLQuant(array(rep(c(rep(1,length(histMinYr:futureMaxYr)+3)),nits),dim=c(1,length(histMinYr:futureMaxYr)+3,1,1,1,nits)),dimnames=dimnames(TAC[,ac(histMinYr:(futureMaxYr+3))]))
HCRTAC <- TAC; HCRTAC[] <- NA; SSB <- HCRTAC[,,1]; HCRSSB <- SSB
stockstore <- stocks # object to store the perceived stocks. The object "stocks" being updated at each time step, it does keep track of the percieved stock
f <- FLQuant(NA,dimnames=dimnames(fishery@landings.n))
for(iFsh in dimnames(f)$unit)
f[,ac(histMinYr:histMaxYr),iFsh] <- sweep(harvest(stocks)[,ac(histMinYr:histMaxYr)],c(1,3:5),propN[,,iFsh],"*")
fSTF <- f; fSTF@.Data[] <- NA
survivors <- FLQuant(NA,dimnames=dimnames(n(biol)[,ac(2011)]))
#------------------------------------------------------------------------------#
# 1) Select Scenario's
#------------------------------------------------------------------------------#
mpOptions<-list(opt=opt,TACvarlim=TACvarlim,Fvarlim=Fvarlim,BBscen=BBscen)
sc.name<-paste(scen,opt,"_TACvarlim",TACvarlim,"_Fvarlim",Fvarlim,"_",BBscen,"_LastRec",LastRecOp,sep="")
outPath2<-paste(outPath,"/HCR sensitivity perm/",sc.name,"/",sep="")
source(paste(codePath,"07_scenarioDescription.r", sep=""))
mpPoints <- get(scen)[[which(names(get(scen))==paste("opt",opt,sep=""))]]
dir.create(outPath2)
#------------------------------------------------------------------------------#
# 2) Start running
#------------------------------------------------------------------------------#
start.time <- Sys.time()
for (iYr in an(projPeriod)){
cat(iYr,"\n")
cat(paste("\n Time running",round(difftime(Sys.time(),start.time,unit="mins"),0),"minutes \n"))
#- Define mortality rates for iYr-1 to calculate survivors to iYr
m <- m(biol)[,ac(iYr-1),,]
z <- (f[,ac(iYr-1),,,,]) + m
# - previous year recruitment
if ((iYr-1)>histMaxYr)
{
ssbtp<-ssbb(biol[,ac(iYr-1),,,,],f[,ac(iYr-1),,,,],stockstore[,ac(iYr-1),,,,])
n(biol)[1,ac(iYr-1)]<-B2RF(ssbtp,SRmod,iYr-1,devR)
}
#- Update biological model to iYr
#- Survivors
survivors <- n(biol)[,ac(iYr-1)] * exp(-z)
n(biol)[ac((range(biol,"min")+1):range(biol,"max")),ac(iYr),,] <- survivors[-dim(survivors)[1],,,,,]@.Data
#- Plusgroup
if (!is.na(range(biol,"plusgroup"))){
n(biol)[ac(range(biol,"max")),ac(iYr),] <- n(biol)[ac(range(biol,"max")),ac(iYr),] + survivors[ac(range(biol,"max"))]
}
cat("\n Finished biology \n")
cat(paste("\n Time running",round(difftime(Sys.time(),start.time,unit="mins"),0),"minutes \n"))
#- Update fishery to year iYr-1
landings.n(fishery)[,ac(iYr-1)] <- sweep(sweep(f[,ac(iYr-1),,,,],c(1:2,4:6),z,"/"),c(1:2,4:6),n(biol)[,ac(iYr-1)]*(1-exp(-z)),"*")
#- Create stock object for assessment
yrmin1 <- iYr -1
TaY <- yrmin1 #Terminal assessment year
ImY <- TaY+1 #Intermediate Year
FcY <- TaY+2 #Forecast year
idxyrmin1 <- which(dimnames(biol@n)$year == yrmin1)
# tmp_biol <- biol[,1:idxyrmin1] #Same but faster as window(biol,histMinYr,yrmin1)
# tmp_fishery <- window(fishery,histMinYr,yrmin1)
# tmp_stocks <- stocks[,1:idxyrmin1] #Same but faster as window(stocks,histMinYr,yrmin1)
#
#- Update stocks to year iYr -1
# tmp_stocks <- updateStocks(tmp_stocks,tmp_fishery,yrmin1,tmp_biol,ctch)
stocks@catch.n[,ac(yrmin1)] <- unitSums(catch.n(fishery)[,ac(yrmin1)]) * ctch[,ac(yrmin1)]
stocks@landings.n[,ac(yrmin1)] <- unitSums(landings.n(fishery)[,ac(yrmin1)])* ctch[,ac(yrmin1)]
stocks@landings.wt[,ac(yrmin1)] <- stocks@catch.wt[,ac(yrmin1)]
stocks@discards.n[,ac(yrmin1)] <- 0
stocks@discards.wt[,ac(yrmin1)] <- 0
stocks@catch[,ac(yrmin1)] <- computeCatch(stocks)[,ac(yrmin1)]
stocks@landings[,ac(yrmin1)] <- computeLandings(stocks)[,ac(yrmin1)]
stocks@discards[,ac(yrmin1)] <- computeDiscards(stocks)[,ac(yrmin1)]
#- Overwrite results from update to stock again (but not for 2011, as that result is already known)
# if(iYr > an(projPeriod[1]))
# stocks <- tmp2stocks(stocks,tmp_stocks,TaY)
# #
# TaYtmp_stocks <- tmp_stocks[,ac(TaY)]
# TaYstocks <- stocks[,ac(TaY)]
# TaYtmp_stocks@stock <- TaYstocks@stock
# TaYtmp_stocks@stock.n <- TaYstocks@stock.n
# TaYtmp_stocks@harvest <- TaYstocks@harvest
# TaYtmp_stocks@name <- TaYstocks@name
# TaYtmp_stocks@desc <- TaYstocks@desc
# TaYtmp_stocks@range <- TaYstocks@range
# stocks[,ac(TaY)] <- TaYtmp_stocks[,ac(TaY)]
#
#
cat("\n Finished update stocks\n")
cat(paste("\n Time running",round(difftime(Sys.time(),start.time,unit="mins"),0),"minutes \n"))
Rindex@index[,ac(TaY)] <- exp ( log(n(biol)[1,ac(TaY)] * Rindex@index.q[,ac(TaY)]) + rnorm(nits,0,c(Rindex@index.var[,ac(TaY)]@.Data)))
cat("\n Finished update survey\n")
cat(paste("\n Time running",round(difftime(Sys.time(),start.time,unit="mins"),0),"minutes \n"))
#-Do the assessment
# for the relevant years, apply the assessment errors
stocks@stock.n[,ac(iYr-1)] <- biol@n[,ac(iYr-1)] * exp(devN[,1,ac(iYr),,,])
stocks@harvest[,ac(iYr-1)] <- f[,ac(iYr-1)] * exp(devF[,1,ac(iYr),,,])
stocks@stock <- computeStock(stocks)
# overwrite the last estimated recruitment?
if (LastRecOp == "geom") stocks@stock.n[1,ac(TaY)] <- exp(yearMeans(log(stocks@stock.n[1,ac(1990:(TaY-1))])))
if (LastRecOp == "RCT3")
{
iTer<-nits
for (i in 1:iTer)
{
# prepare init file for RCT3
R<-iter(stock.n(stocks)[ac(0),ac(1990:TaY)],iTer)
R<-c(R@.Data)
R[length(R)]<--11
IBTS.index<-c(rep(-11,8),c(iter(Rindex@index[,ac(1998:TaY)],iTer)@.Data))
years<-1990:TaY
# remove files in the RCT3 folder !!!!
file.name<-paste(outPath2,"RCT3init.txt",sep="")
file.remove(file=file.name)
write.table(data.frame("RCT3 for NEA Mackerel"),file=file.name,quote=F,col.names=FALSE,row.names=FALSE,append=TRUE,sep="\t")
write.table(data.frame(1,length(R),2,"SAM","IBTS.index"),file=file.name,quote=F,col.names=FALSE,row.names=FALSE,append=TRUE,sep="\t")
write.table(data.frame(years,R,IBTS.index),file=file.name,col.names=FALSE,quote=F,row.names=FALSE,append=TRUE,sep="\t")
write.table(data.frame(c("SAM","IBTS.index")),file=file.name,col.names=FALSE,quote=F,row.names=FALSE,append=TRUE,sep="\t")
source(paste(codePath,"RCT3v4a.r",sep=""))
Rct3<-RCT3(file.name,logged=T)
RCT3res<-Rct3$output()
stocks@stock.n[1,ac(TaY),,,,iTer] <- RCT3res$Years$WAPred
}
}
# copy the perception of the stock in terminal assessment year to the stockstore object
stockstore[,ac(TaY)] <- stocks[,ac(TaY)]
# survivors for the short term forecast
dimnames(survivors)$year<-ac(iYr)
# recruitment for the first year in the short term forecast is the geometric mean of the historical time series
survivors[ac(0),] <- exp(yearMeans(log(stock.n(stocks)[1,ac(1990:(TaY-1))])))
#Set plusgroup at 12 (which is true plusgroup - recruitment)
survivors[-1,] <- FLQuant(setPlusGroup(stocks[,ac(TaY)]@stock.n * exp(-stocks[,ac(TaY)]@harvest-stocks[,ac(TaY)]@m),11)@.Data,
dimnames=list(age=dimnames(stocks@stock.n)$age[-1],year=ac(TaY),unit=dimnames(stocks@stock.n)$unit,
season=dimnames(stocks@stock.n)$season,area=dimnames(stocks@stock.n)$area,iter=1:nits))
cat("\n Finished stock assessment \n")
cat(paste("\n Time running",round(difftime(Sys.time(),start.time,unit="mins"),0),"minutes \n"))
#- Project
res <- projectMac(stocks[,1:idxyrmin1],survivors,window(fishery,histMinYr,yrmin1),iYr,TAC,mpPoints$scen,NULL,histMaxYr,mpPoints,mpOptions)
TAC[,ac(FcY)] <- res[["TAC"]]
HCRTAC[,ac(FcY)] <- res[["HCRTAC"]]
HCRSSB[,ac(FcY)] <- res[["SSB"]][["HCRSSB"]][,ac(FcY)]
SSB[,ac(FcY)] <- res[["SSB"]][["SSB"]][,ac(FcY)]
if(iYr != rev(projPeriod)[1]) fSTF[,ac(FcY)] <- res[["fSTF"]]
cat("\n Finished forecast \n")
cat(paste("\n Time running",round(difftime(Sys.time(),start.time,unit="mins"),0),"minutes \n"))
#-Calculate effort accordingly (assuming constant catchability)
f[,ac(ImY)] <- sweep(catch.sel(fishery)[,ac(ImY)],c(3,6),pmin(maxEff,f31tF(TAC*TACusage,biol,ImY,fishery)),"*")
cat("\n Finished effort calc \n")
cat(paste("\n Time running",round(difftime(Sys.time(),start.time,unit="mins"),0),"minutes \n"))
#- Save each year the output
#save.image(file=paste(outPath,scen,"_",opt,"_",mpPoints$FadultA,"_",iYr,".RData",sep=""))
#save.image(file=paste("/home/hintz001/WKHELP_test2_",iYr,".RData",sep=""))
#save(file=paste("D:/WKHELP_test3_",iYr,".RData",sep=""))
}
stockstore@landings.n <- stockstore@harvest * stockstore@stock.n * (1- exp(-stockstore@harvest - stockstore@m)) / (stockstore@harvest + stockstore@m)
stockstore@landings.wt<-stockstore@catch.wt
#-------------------------------------------------------------------------------
# 3): Save the objects
#-------------------------------------------------------------------------------
save(biol ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_Finalbiol.RData", sep=""))
save(fishery ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_Finalfishery.RData", sep=""))
save(Rindex ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_Finalsurveys.RData", sep=""))
save(stocks ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_Finalstocks.RData", sep=""))
save(stockstore ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_Finalpercievedstocks.RData", sep=""))
save(TAC ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_FinalTAC.RData", sep=""))
save(HCRTAC ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_FinalHCRTAC.RData", sep=""))
save(f ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_Finalf.RData", sep=""))
save(fSTF ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_FinalfSTF.RData", sep=""))
save(SSB ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_FinalSSB.RData", sep=""))
save(mpPoints ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_FinalmpPoints.RData", sep=""))
save(settings ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_Finalsettings.RData", sep=""))
}
|
/Sensitivity perm/03.2_run sim batch.r
|
no_license
|
brune001/WKMACLTMP2014
|
R
| false
| false
| 16,705
|
r
|
rm(list=ls())
library(FLCore)
#library(FLFleet)
#library(FLAssess)
#library(FLSAM)
library(MASS)
#library(msm)
for (opt in c(4:16,21))
{
cat("Management opt",opt,"\n")
# permanent changes?
perm<-F
if (perm)cat("!!! scenario with permanent changes")
# numer of years for the projections
prlength <- 40
# reduce the number of iterations to
short<-T
nits2<-1000
scen <- c("LTMP3.2mt") #
TACvarlim <- T # whether or not to apply the 20% limit on TAC change
Fvarlim <- T # whether or not to apply the 10% limit on Fbar change
BBscen <- "noBB" # banking borrowing options :
# "Banking" : always bank
# "Borrowing" : always borrow
# "AlternateBank" : bank first and then alternate
# "AlternateBorrow" : borrow first and then alternate
# "MinVar" : use BB to minise TAC variability
LastRecOp <- "geom" # option to replace the last estimated recruitment : "SAM", "geom", "RCT3"
# "SAM" = don't overwrite the SAM estimmate
# "geom" = replace by geomean 1990/(TaY-1)
# "RCT3" = replace by RCT3 output
# run the MSE
codePath <- "W:/IMARES/Data/ICES-WG/WKMACLTMP/R code/Sensitivity perm/"
if(substr(R.Version()$os,1,3)== "lin")
{
codePath <- sub("W:/IMARES/Data/ICES-WG/","/media/w/",codePath)
}
wine <- F
path <- "W:/IMARES/Data/ICES-WG/WKMACLTMP/"
inPath <- "W:/IMARES/Data/ICES-WG/WKMACLTMP/Data/"
codePath <- "W:/IMARES/Data/ICES-WG/WKMACLTMP/R code/"
outPath <- "W:/IMARES/Data/ICES-WG/WKMACLTMP/Results/"
if(substr(R.Version()$os,1,3)== "lin"){
path <- sub("W:/IMARES/Data/ICES-WG/","/media/w/",path)
inPath <- sub("W:/IMARES/Data/ICES-WG/","/media/w/",inPath)
codePath <- sub("W:/IMARES/Data/ICES-WG/","/media/w/",codePath)
outPath <- sub("W:/IMARES/Data/ICES-WG/","/media/w/",outPath)
}
home<-F
if(home)
{
path <- "D://MSE/"
inPath <- "D://MSE/Data/"
codePath <- "D://MSE/R code/"
outPath <- "D://MSE/Results/"
}
outPathp<-outPath
if (perm) outPathp <- paste(outPath,"perm",sep="")
#- Load objects
load(file=paste(outPath,"Mac.RData", sep=""))
load(file=paste(outPath,"Macctrl.RData", sep=""))
load(file=paste(outPathp,"biol.RData", sep=""))
load(file=paste(outPathp,"fishery.RData", sep=""))
load(file=paste(outPath,"propN.RData", sep=""))
load(file=paste(outPath,"propWt.RData", sep=""))
load(file=paste(outPath,"ctch.RData", sep=""))
load(file=paste(outPath,"surveys.RData", sep=""))
load(file=paste(outPathp,"stocks.RData", sep=""))
load(file=paste(outPath,"settings.RData", sep=""))
load(file=paste(outPath,"SRmod.RData", sep=""))
load(file=paste(outPath,"resRFinal.RData", sep=""))
for(i in 1:length(settings)) assign(x=names(settings)[i],value=settings[[i]])
source(paste(codePath,"functions.r", sep=""))
source(paste(codePath,"04_forecastScenarios.r", sep=""))
nits<-nits2
#- Settings
load(file=paste(outPath,"settings.RData",sep=""))
histMinYr <- settings$histMinYr
histMaxYr <- settings$histMaxYr
nyrs <- settings$nyrs
futureMaxYr <- settings$futureMaxYr
histPeriod <- settings$histPeriod
projPeriod <- settings$projPeriod
recrPeriod <- settings$recrPeriod
selPeriod <- settings$selPeriod
fecYears <- settings$fecYears
nits <- settings$nits
RecType<-settings$RecType
# set manually the projection period
projPeriod <-ac((histMaxYr+1):(histMaxYr+prlength))
futureMaxYr <- an(rev(projPeriod)[1] )
settings$projPeriod<-projPeriod
settings$futureMaxYr<-futureMaxYr
nits<-nits2
# run a shorter number of iterations : has to be 100
short<-T
if (short)
{
nits <- nits2
biol <- biol[,,,,,1:nits]
fishery <- iter(fishery,1:nits)
ctch <- ctch[,,,,,1:nits]
Rindex <- Rindex[,,,,,1:nits]
stocks <- stocks[,,,,,1:nits]
SRmod <- SRmod[1:nits,]
devR <- devR[,,,,,1:nits]
}
load(file=paste(outPath,"resNFinal_simple.RData", sep=""))
load(file=paste(outPath,"resFFinal_simple.RData", sep=""))
if (short)
{
devN<-devN[,,,,,1:nits]
devF<-devF[,,,,,1:nits]
}
#
#------------------------------------------------------------------------------#
# 0) setup TACS & F's and Survivors and maximum change in effort
#------------------------------------------------------------------------------#
maxEff <- 1000
TAC <- FLQuant(NA,dimnames=list(age="all",year=histMinYr:(futureMaxYr+3),unit=c("A"),season="all",area=1,iter=1:nits))
TAC[,ac(2000:2014),"A"] <- c(612,670,683,583,532,422,444,502,458,605,885,959,927,906,1396) *1000
#TACusage <- FLQuant(array(rep(c(rep(1,length(histMinYr:futureMaxYr)+3),rep(0.539755,length(histMinYr:futureMaxYr)+3),
# rep(1,length(histMinYr:futureMaxYr)+3),rep(1,length(histMinYr:futureMaxYr)+3)),nits),dim=c(1,length(histMinYr:futureMaxYr)+3,4,1,1,nits)),dimnames=dimnames(TAC[,ac(histMinYr:(futureMaxYr+3))]))
TACusage <- FLQuant(array(rep(c(rep(1,length(histMinYr:futureMaxYr)+3)),nits),dim=c(1,length(histMinYr:futureMaxYr)+3,1,1,1,nits)),dimnames=dimnames(TAC[,ac(histMinYr:(futureMaxYr+3))]))
HCRTAC <- TAC; HCRTAC[] <- NA; SSB <- HCRTAC[,,1]; HCRSSB <- SSB
stockstore <- stocks # object to store the perceived stocks. The object "stocks" being updated at each time step, it does keep track of the percieved stock
f <- FLQuant(NA,dimnames=dimnames(fishery@landings.n))
for(iFsh in dimnames(f)$unit)
f[,ac(histMinYr:histMaxYr),iFsh] <- sweep(harvest(stocks)[,ac(histMinYr:histMaxYr)],c(1,3:5),propN[,,iFsh],"*")
fSTF <- f; fSTF@.Data[] <- NA
survivors <- FLQuant(NA,dimnames=dimnames(n(biol)[,ac(2011)]))
#------------------------------------------------------------------------------#
# 1) Select Scenario's
#------------------------------------------------------------------------------#
mpOptions<-list(opt=opt,TACvarlim=TACvarlim,Fvarlim=Fvarlim,BBscen=BBscen)
sc.name<-paste(scen,opt,"_TACvarlim",TACvarlim,"_Fvarlim",Fvarlim,"_",BBscen,"_LastRec",LastRecOp,sep="")
outPath2<-paste(outPath,"/HCR sensitivity perm/",sc.name,"/",sep="")
source(paste(codePath,"07_scenarioDescription.r", sep=""))
mpPoints <- get(scen)[[which(names(get(scen))==paste("opt",opt,sep=""))]]
dir.create(outPath2)
#------------------------------------------------------------------------------#
# 2) Start running
#------------------------------------------------------------------------------#
start.time <- Sys.time()
for (iYr in an(projPeriod)){
cat(iYr,"\n")
cat(paste("\n Time running",round(difftime(Sys.time(),start.time,unit="mins"),0),"minutes \n"))
#- Define mortality rates for iYr-1 to calculate survivors to iYr
m <- m(biol)[,ac(iYr-1),,]
z <- (f[,ac(iYr-1),,,,]) + m
# - previous year recruitment
if ((iYr-1)>histMaxYr)
{
ssbtp<-ssbb(biol[,ac(iYr-1),,,,],f[,ac(iYr-1),,,,],stockstore[,ac(iYr-1),,,,])
n(biol)[1,ac(iYr-1)]<-B2RF(ssbtp,SRmod,iYr-1,devR)
}
#- Update biological model to iYr
#- Survivors
survivors <- n(biol)[,ac(iYr-1)] * exp(-z)
n(biol)[ac((range(biol,"min")+1):range(biol,"max")),ac(iYr),,] <- survivors[-dim(survivors)[1],,,,,]@.Data
#- Plusgroup
if (!is.na(range(biol,"plusgroup"))){
n(biol)[ac(range(biol,"max")),ac(iYr),] <- n(biol)[ac(range(biol,"max")),ac(iYr),] + survivors[ac(range(biol,"max"))]
}
cat("\n Finished biology \n")
cat(paste("\n Time running",round(difftime(Sys.time(),start.time,unit="mins"),0),"minutes \n"))
#- Update fishery to year iYr-1
landings.n(fishery)[,ac(iYr-1)] <- sweep(sweep(f[,ac(iYr-1),,,,],c(1:2,4:6),z,"/"),c(1:2,4:6),n(biol)[,ac(iYr-1)]*(1-exp(-z)),"*")
#- Create stock object for assessment
yrmin1 <- iYr -1
TaY <- yrmin1 #Terminal assessment year
ImY <- TaY+1 #Intermediate Year
FcY <- TaY+2 #Forecast year
idxyrmin1 <- which(dimnames(biol@n)$year == yrmin1)
# tmp_biol <- biol[,1:idxyrmin1] #Same but faster as window(biol,histMinYr,yrmin1)
# tmp_fishery <- window(fishery,histMinYr,yrmin1)
# tmp_stocks <- stocks[,1:idxyrmin1] #Same but faster as window(stocks,histMinYr,yrmin1)
#
#- Update stocks to year iYr -1
# tmp_stocks <- updateStocks(tmp_stocks,tmp_fishery,yrmin1,tmp_biol,ctch)
stocks@catch.n[,ac(yrmin1)] <- unitSums(catch.n(fishery)[,ac(yrmin1)]) * ctch[,ac(yrmin1)]
stocks@landings.n[,ac(yrmin1)] <- unitSums(landings.n(fishery)[,ac(yrmin1)])* ctch[,ac(yrmin1)]
stocks@landings.wt[,ac(yrmin1)] <- stocks@catch.wt[,ac(yrmin1)]
stocks@discards.n[,ac(yrmin1)] <- 0
stocks@discards.wt[,ac(yrmin1)] <- 0
stocks@catch[,ac(yrmin1)] <- computeCatch(stocks)[,ac(yrmin1)]
stocks@landings[,ac(yrmin1)] <- computeLandings(stocks)[,ac(yrmin1)]
stocks@discards[,ac(yrmin1)] <- computeDiscards(stocks)[,ac(yrmin1)]
#- Overwrite results from update to stock again (but not for 2011, as that result is already known)
# if(iYr > an(projPeriod[1]))
# stocks <- tmp2stocks(stocks,tmp_stocks,TaY)
# #
# TaYtmp_stocks <- tmp_stocks[,ac(TaY)]
# TaYstocks <- stocks[,ac(TaY)]
# TaYtmp_stocks@stock <- TaYstocks@stock
# TaYtmp_stocks@stock.n <- TaYstocks@stock.n
# TaYtmp_stocks@harvest <- TaYstocks@harvest
# TaYtmp_stocks@name <- TaYstocks@name
# TaYtmp_stocks@desc <- TaYstocks@desc
# TaYtmp_stocks@range <- TaYstocks@range
# stocks[,ac(TaY)] <- TaYtmp_stocks[,ac(TaY)]
#
#
cat("\n Finished update stocks\n")
cat(paste("\n Time running",round(difftime(Sys.time(),start.time,unit="mins"),0),"minutes \n"))
Rindex@index[,ac(TaY)] <- exp ( log(n(biol)[1,ac(TaY)] * Rindex@index.q[,ac(TaY)]) + rnorm(nits,0,c(Rindex@index.var[,ac(TaY)]@.Data)))
cat("\n Finished update survey\n")
cat(paste("\n Time running",round(difftime(Sys.time(),start.time,unit="mins"),0),"minutes \n"))
#-Do the assessment
# for the relevant years, apply the assessment errors
stocks@stock.n[,ac(iYr-1)] <- biol@n[,ac(iYr-1)] * exp(devN[,1,ac(iYr),,,])
stocks@harvest[,ac(iYr-1)] <- f[,ac(iYr-1)] * exp(devF[,1,ac(iYr),,,])
stocks@stock <- computeStock(stocks)
# overwrite the last estimated recruitment?
if (LastRecOp == "geom") stocks@stock.n[1,ac(TaY)] <- exp(yearMeans(log(stocks@stock.n[1,ac(1990:(TaY-1))])))
if (LastRecOp == "RCT3")
{
iTer<-nits
for (i in 1:iTer)
{
# prepare init file for RCT3
R<-iter(stock.n(stocks)[ac(0),ac(1990:TaY)],iTer)
R<-c(R@.Data)
R[length(R)]<--11
IBTS.index<-c(rep(-11,8),c(iter(Rindex@index[,ac(1998:TaY)],iTer)@.Data))
years<-1990:TaY
# remove files in the RCT3 folder !!!!
file.name<-paste(outPath2,"RCT3init.txt",sep="")
file.remove(file=file.name)
write.table(data.frame("RCT3 for NEA Mackerel"),file=file.name,quote=F,col.names=FALSE,row.names=FALSE,append=TRUE,sep="\t")
write.table(data.frame(1,length(R),2,"SAM","IBTS.index"),file=file.name,quote=F,col.names=FALSE,row.names=FALSE,append=TRUE,sep="\t")
write.table(data.frame(years,R,IBTS.index),file=file.name,col.names=FALSE,quote=F,row.names=FALSE,append=TRUE,sep="\t")
write.table(data.frame(c("SAM","IBTS.index")),file=file.name,col.names=FALSE,quote=F,row.names=FALSE,append=TRUE,sep="\t")
source(paste(codePath,"RCT3v4a.r",sep=""))
Rct3<-RCT3(file.name,logged=T)
RCT3res<-Rct3$output()
stocks@stock.n[1,ac(TaY),,,,iTer] <- RCT3res$Years$WAPred
}
}
# copy the perception of the stock in terminal assessment year to the stockstore object
stockstore[,ac(TaY)] <- stocks[,ac(TaY)]
# survivors for the short term forecast
dimnames(survivors)$year<-ac(iYr)
# recruitment for the first year in the short term forecast is the geometric mean of the historical time series
survivors[ac(0),] <- exp(yearMeans(log(stock.n(stocks)[1,ac(1990:(TaY-1))])))
#Set plusgroup at 12 (which is true plusgroup - recruitment)
survivors[-1,] <- FLQuant(setPlusGroup(stocks[,ac(TaY)]@stock.n * exp(-stocks[,ac(TaY)]@harvest-stocks[,ac(TaY)]@m),11)@.Data,
dimnames=list(age=dimnames(stocks@stock.n)$age[-1],year=ac(TaY),unit=dimnames(stocks@stock.n)$unit,
season=dimnames(stocks@stock.n)$season,area=dimnames(stocks@stock.n)$area,iter=1:nits))
cat("\n Finished stock assessment \n")
cat(paste("\n Time running",round(difftime(Sys.time(),start.time,unit="mins"),0),"minutes \n"))
#- Project
res <- projectMac(stocks[,1:idxyrmin1],survivors,window(fishery,histMinYr,yrmin1),iYr,TAC,mpPoints$scen,NULL,histMaxYr,mpPoints,mpOptions)
TAC[,ac(FcY)] <- res[["TAC"]]
HCRTAC[,ac(FcY)] <- res[["HCRTAC"]]
HCRSSB[,ac(FcY)] <- res[["SSB"]][["HCRSSB"]][,ac(FcY)]
SSB[,ac(FcY)] <- res[["SSB"]][["SSB"]][,ac(FcY)]
if(iYr != rev(projPeriod)[1]) fSTF[,ac(FcY)] <- res[["fSTF"]]
cat("\n Finished forecast \n")
cat(paste("\n Time running",round(difftime(Sys.time(),start.time,unit="mins"),0),"minutes \n"))
#-Calculate effort accordingly (assuming constant catchability)
f[,ac(ImY)] <- sweep(catch.sel(fishery)[,ac(ImY)],c(3,6),pmin(maxEff,f31tF(TAC*TACusage,biol,ImY,fishery)),"*")
cat("\n Finished effort calc \n")
cat(paste("\n Time running",round(difftime(Sys.time(),start.time,unit="mins"),0),"minutes \n"))
#- Save each year the output
#save.image(file=paste(outPath,scen,"_",opt,"_",mpPoints$FadultA,"_",iYr,".RData",sep=""))
#save.image(file=paste("/home/hintz001/WKHELP_test2_",iYr,".RData",sep=""))
#save(file=paste("D:/WKHELP_test3_",iYr,".RData",sep=""))
}
stockstore@landings.n <- stockstore@harvest * stockstore@stock.n * (1- exp(-stockstore@harvest - stockstore@m)) / (stockstore@harvest + stockstore@m)
stockstore@landings.wt<-stockstore@catch.wt
#-------------------------------------------------------------------------------
# 3): Save the objects
#-------------------------------------------------------------------------------
save(biol ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_Finalbiol.RData", sep=""))
save(fishery ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_Finalfishery.RData", sep=""))
save(Rindex ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_Finalsurveys.RData", sep=""))
save(stocks ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_Finalstocks.RData", sep=""))
save(stockstore ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_Finalpercievedstocks.RData", sep=""))
save(TAC ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_FinalTAC.RData", sep=""))
save(HCRTAC ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_FinalHCRTAC.RData", sep=""))
save(f ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_Finalf.RData", sep=""))
save(fSTF ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_FinalfSTF.RData", sep=""))
save(SSB ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_FinalSSB.RData", sep=""))
save(mpPoints ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_FinalmpPoints.RData", sep=""))
save(settings ,file=paste(outPath2,"/",scen,opt,mpPoints$FadultA,"_Finalsettings.RData", sep=""))
}
|
## This function creates a list of 4 subfunctions: set, get, setinverse and getinverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
## The function set assigns the value of the matrix
set <- function(y) {
x <<- y ## Allows the subfunction to alter the x and i variables in the parent environment,
## the makeCacheMatrix() function itself
i <<- NULL
}
## The subfunction get take the stored matrix in the variable x and returns it
get <- function() x
## The setinverse subfunction stores the value of the result for use it as cache later
setinverse <- function(solve) i <<- solve
## The getinverse subfunction calls the value previously stores by the function setinverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function gets the inverse of a matrix using the function solve
## If the inverse has been calculated before, the function extract the value from
## the "cache" and informs to the user that it is using the previous calculted values
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse() ## Assign the values of the function
if(!is.null(i)) { ## If the values already exists then extracts and return the values
message("getting cached data")
return(i)
}
data <- x$get() ## Save the value of the function get
i <- solve(data, ...) ## calculate the inverse of the matrix and save it
x$setinverse(i) ## Save the values using the function setinverse
i ## Output the result of the calculation or the old value
}
|
/cachematrix.R
|
no_license
|
rafaruizs/ProgrammingAssignment2
|
R
| false
| false
| 1,952
|
r
|
## This function creates a list of 4 subfunctions: set, get, setinverse and getinverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
## The function set assigns the value of the matrix
set <- function(y) {
x <<- y ## Allows the subfunction to alter the x and i variables in the parent environment,
## the makeCacheMatrix() function itself
i <<- NULL
}
## The subfunction get take the stored matrix in the variable x and returns it
get <- function() x
## The setinverse subfunction stores the value of the result for use it as cache later
setinverse <- function(solve) i <<- solve
## The getinverse subfunction calls the value previously stores by the function setinverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function gets the inverse of a matrix using the function solve
## If the inverse has been calculated before, the function extract the value from
## the "cache" and informs to the user that it is using the previous calculted values
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse() ## Assign the values of the function
if(!is.null(i)) { ## If the values already exists then extracts and return the values
message("getting cached data")
return(i)
}
data <- x$get() ## Save the value of the function get
i <- solve(data, ...) ## calculate the inverse of the matrix and save it
x$setinverse(i) ## Save the values using the function setinverse
i ## Output the result of the calculation or the old value
}
|
# ===============================================================================
# * FILE: 03_rochester.R
# * PURPOSE: Import and Analyze PUMS data from 2018
# * AUTHORS: Andrea Ringer
# * DATE CREATED: June 9, 2020
# * DATE LAST MODIFIED: July 2, 2020
# ===============================================================================
library(readr)
library(tidyverse)
library(survey)
library(srvyr)
library(Hmisc)
library(cwhmisc)
library(collapse)
setwd("/Users/andrearinger/Documents/PUMS Data")
#-------------------------------------------------------------------------------
# Load PUMS Household Data and Select Rochester PUMAs
#-------------------------------------------------------------------------------
pums_hh <- read_csv("psam_h36_monroe.csv", guess_max=12000)
pums_all <- read_csv("psam_all_monroe.csv", guess_max = 12000)
hh_roc <- pums_hh %>%
filter(PUMA == "902" | PUMA == "903")
all_roc <- pums_all %>%
filter(PUMA == "902" | PUMA == "903")
hh_monroe <- pums_hh
#-------------------------------------------------------------------------------
# Clean Person and Household Data
#-------------------------------------------------------------------------------
# Separate year and ID number from SERIALNO
all_roc$year=substr(as.character(p_roc$SERIALNO),1,4)
all_roc$id=substr(as.character(p_roc$SERIALNO),5,25)
hh_roc$year=substr(as.character(hh_roc$SERIALNO),1,4)
hh_roc$id=substr(as.character(hh_roc$SERIALNO),5,25)
#-------------------------------------------------------------------------------
# GRPIP and HINCP: Gross rent as a percentage of HH income; HH income
#-------------------------------------------------------------------------------
# Generate categories for rent burden
hh_roc$GRPIP_cat <- cut(hh_roc$GRPIP, breaks = c(0, 30, 50, 60, 80, 100, 10000000), labels = c(1,2,3,4,5,6), right = TRUE)
summary(hh_roc$GRPIP_cat)
tapply(hh_roc$WGTP, list(hh_roc$GRPIP_cat), sum)
prop.table(tapply(hh_roc$WGTP, list(hh_roc$GRPIP_cat), sum))
# <30% income: 42.8%
# 30-50% income: 24.8%
# 50-60% income: 5.7%
# 60-80% income: 7.9%
# 80-100% income: 5.2%
# >100% income: 13.5%
# Create adjusted household income variable (using HINCP, ADJINC)
hh_roc$ADJINC_1 <- hh_roc$ADJINC/1000000
hh_roc$HINCP_adj <- hh_roc$HINCP*hh_roc$ADJINC_1
# Compute Monroe County median income
hh_monroe$ADJINC_1 <- hh_monroe$ADJINC/1000000
hh_monroe$HINCP_adj <- hh_monroe$HINCP*hh_roc$ADJINC_1
w_median_mon <- w.median(hh_monroe$HINCP_adj, hh_monroe$WGTP)
# Generate Monroe County AMI categories
hh_roc$ami_mon = hh_roc$HINCP_adj/w_median_mon
hh_roc$ami_mon_cat <- cut(hh_roc$ami_mon, breaks = c(0, 0.3, 0.5, 0.8, 10000000), labels = c(1,2,3,4), right = TRUE)
tapply(hh_roc$WGTP, list(hh_roc$ami_mon_cat), sum)
prop.table(tapply(hh_roc$WGTP, list(hh_roc$ami_mon_cat), sum))
gross_rent_perc <- select(hh_roc, GRPIP, WGTP) %>% tally(wt=WGTP)
ggplot(hh_roc, aes(x=GRPIP, weight = WGTP)) +
geom_histogram() +
stat_bin(binwidth=1, geom="text", aes(label=..count..), vjust=-1.0) # Gross rent as % HH income
# Why do so many people spend over 100% of income on rent?
# For those with GRPIP==101, weighted histogram of household incomes over last 12 months
hh_roc %>%
filter(GRPIP==101) %>%
ggplot(aes(x=HINCP, weight = WGTP)) + geom_histogram() +
stat_bin(binwidth=1, geom="text", aes(label=..count..), vjust=-1.0)
# For those with GRPIP==101, weighted summary of household incomes over last 12 months
hh_roc %>%
select(WGTP, GRPIP, HINCP) %>%
filter(GRPIP==101) %>%
summary(HINCP, wt=WGTP)
# Graph of income related to rent burden
hh_roc %>%
ggplot() +
geom_point(aes(x=HINCP, y=GRPIP, size=WGTP), shape=21) +
xlim(0,100000) # graph view restricted to HINCP from $0-$100,000
# Further analysis: put in 30% line, calculate percent of HHs above line
# Histogram of household income
hh_roc %>%
ggplot(aes(x=HINCP, weight=WGTP)) +
geom_histogram() +
stat_bin(binwidth=1, geom="text", aes(label=..count..), vjust=-1.0)
#===============================================================================
# Characteristics of those who are rent burdened - HH data
#===============================================================================
hh_rental <- hh_roc %>%
filter(hh_roc$TEN==3) # Rental HHs (excludes homeowner HHs)
rent_bur_no <- hh_roc %>% # <30% income on rent
filter(GRPIP_cat==1)
rent_bur <- hh_roc %>%
filter(GRPIP_cat %in% 2:6) # >=30% income on rent
summary(rent_bur$GRPIP_cat)
rent_bur_30to50 <- hh_roc %>%
filter(GRPIP_cat==2) # >=30% and <50% income on rent
summary(rent_bur_30to50$GRPIP_cat)
rent_bur_50to101 <- hh_roc %>%
filter(GRPIP_cat %in% 3:6) # >50% income on rent
summary(rent_bur_50to101$GRPIP_cat)
#-------------------------------------------------------------------------------
# FES: Family Type and Employment Status
#-------------------------------------------------------------------------------
# 1-4 Married Couple Family
# 1 = Husband and wife in LF; 2 = Husband in LF, wife not in LF
# 3 = Husband not in LF, wife in LF; 4 = Neither husband nor wife in LF
# 5-8 Other Family
# 5 = Male HHer, no wife present, LF; 6 = Male HHer, no wife present, not in LF
# 7 = Female HHer, no husband present, LF; 8 = Female HHer, no husband present, not in LF
# All rental HHs
tapply(hh_rental$WGTP, list(hh_rental$FES), sum)
prop.table(tapply(hh_rental$WGTP, list(hh_rental$FES), sum))
# Any rent burdened households (>=30% income)
tapply(rent_bur$WGTP, list(rent_bur$FES), sum)
prop.table(tapply(rent_bur$WGTP, list(rent_bur$FES), sum))
# Non rent burdened households (<30% income)
tapply(rent_bur_no$WGTP, list(rent_bur_no$FES), sum)
prop.table(tapply(rent_bur_no$WGTP, list(rent_bur_no$FES), sum))
# Rent burdened households (>=30% and <50% income)
tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$FES), sum)
prop.table(tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$FES), sum))
# Severely rent burdened households >=50%
tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$FES), sum)
prop.table(tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$FES), sum))
#--------------- STANDARD ERRORS - All Rental Family HHs -----------------------
rep.names <- paste0('WGTP', 1:80)
# All Rental Family HHs
all <- hh_rental %>% filter(FES %in% 1:8)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=623.25
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [21961, 24005]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [21761, 24205]
# Rental HHs: married couple, at least one HHer in LF
x <- hh_rental %>% filter(FES %in% 1:3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=284.39
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [3523, 4637]
# Proportion standard error: married couple, at least one HHer in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0114
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.155, 0.2]
# Rental HHs: single male headed in LF
x <- hh_rental %>% filter(FES==5)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=216.69
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [1179, 2029]
# Proportion standard error: single male headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00924
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0517, 0.0879]
# Rental HHs: single male headed not in LF
x <- hh_rental %>% filter(FES==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=137.26
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [587, 1125]
# Proportion standard error: single male headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00589
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0257, 0.0488]
# Rental HHs: single female headed in LF
x <- hh_rental %>% filter(FES==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=422.68
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [8890, 10546]
# Proportion standard error: single female headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0144
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.395, 0.451]
# Rental HHs: single female headed not in LF
x <- hh_rental %>% filter(FES==8)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=428.09
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [4882, 6560]
# Proportion standard error: single female headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0174
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.215, 0.283]
# Rental HHs: married couple family not in LF
x <- hh_rental %>% filter(FES==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=167.22
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [676, 1332]
# Proportion standard error: married couple family not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) #0.00718
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0296, 0.0578]
#--------------- STANDARD ERRORS - Any Rent-Burdened Family HHs ----------------
rep.names <- paste0('WGTP', 1:80)
# Any Rent Burdened Family HHs (>=30% of income on rent)
all <- rent_bur %>% filter(FES %in% 1:8)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=536.06
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [13445, 15203]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [13273, 15375]
# Any Rent-burdened family HHs: married couple, at least one HHer in LF
x <- rent_bur %>% filter(FES %in% 1:3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=138.06
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [856, 1398]
# Prop SE: Any rent-burdened family, married couple, at least one HHer in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00918
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0607, 0.0967]
# Any rent-burdened family HHs: single male headed in LF
x <- rent_bur %>% filter(FES==5)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=126.58
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [362, 858]
# Prop SE: Any rent-burdened family, single male headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00869
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0255, 0.0596]
# Any rent-burdened family HHs: single male headed not in LF
x <- rent_bur %>% filter(FES==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=119.48
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [326, 794]
# Prop SE: Any rent-burdened family, single male headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00821
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.023, 0.0552]
# Any rent-burdened family HHs: single female headed in LF
x <- rent_bur %>% filter(FES==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=435.02
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [5941, 7647]
# Prop SE: Any rent-burdened family single female headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0246
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.426, 0.523]
# Any rent-burdened family HHs: single female headed not in LF
x <- rent_bur %>% filter(FES==8)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=401.75
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [3809, 5383]
# Prop SE: Any rent-burdened family, single female headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0253
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.271, 0.371]
# Any rent-burdened family HHs: married couple family not in LF
x <- rent_bur %>% filter(FES==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=127.30
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [387, 887]
# Prop SE: Any rent-burdened family, married couple family not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) #0.00873
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0274, 0.0616]
#--------------- STANDARD ERRORS - Non Rent Burdened Family HHs ----------------
rep.names <- paste0('WGTP', 1:80)
# Non-Rent Burdened Family HHs
all <- rent_bur_no %>% filter(FES %in% 1:8)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=455.87
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [7569, 9065]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [7423, 9211]
# Non Rent-burdened family HHs: married couple, at least one HHer in LF
x <- rent_bur_no %>% filter(FES %in% 1:3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=273.28
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2417, 3489]
# Prop SE: Non Rent-burdened family, married couple, at least one HHer in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0265
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.303, 0.407]
# Non Rent-burdened family HHs: single male headed in LF
x <- rent_bur_no %>% filter(FES==5)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=175.46
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [650, 1338]
# Prop SE: Non rent-burdened family, single male headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0201
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0802, 0.1588]
# Non Rent-burdened family HHs: single male headed not in LF
x <- rent_bur_no %>% filter(FES==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=73.79
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [108, 398]
# Prop SE: Non rent-burdened family, single male headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00871
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0133, 0.0475]
# Non Rent-burdened family HHs: single female headed in LF
x <- rent_bur_no %>% filter(FES==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=296.99
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2330, 3494]
# Prop SE: Non rent-burdened family single female headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0301
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.291, 0.409]
# Non Rent-burdened family HHs: single female headed not in LF
x <- rent_bur_no %>% filter(FES==8)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=160.19
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [546, 1174]
# Prop SE: Non rent-burdened family, single female headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0184
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0673, 0.1395]
# Non Rent-burdened family HHs: married couple family not in LF
x <- rent_bur_no %>% filter(FES==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=93.71
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [161, 529]
# Prop SE: Non rent-burdened family, married couple family not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) #0.0110
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0199, 0.0631]
#--------------- STANDARD ERRORS - Rent Burdened Family HHs --------------------
rep.names <- paste0('WGTP', 1:80)
# Rent Burdened Family HHs (>=30% and <50% income on rent)
all <- rent_bur_30to50 %>% filter(FES %in% 1:8)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=336.43
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [5141, 6245]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [5034, 6352]
# Rent-burdened family HHs: married couple, at least one HHer in LF
x <- rent_bur_30to50 %>% filter(FES %in% 1:3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=102.92
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [417, 821]
# Prop SE: Rent-burdened family, married couple, at least one HHer in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0169
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0756, 0.1419]
# Rent-burdened family HHs: single male headed in LF
x <- rent_bur_30to50 %>% filter(FES==5)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=104.47
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [221, 631]
# Prop SE: rent-burdened family, single male headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0178
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0399, 0.1097]
# Rent-burdened family HHs: single male headed not in LF
x <- rent_bur_30to50 %>% filter(FES==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=75.45
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [53.1, 348.9]
# Prop SE: rent-burdened family, single male headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0131
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.00965, 0.06096]
# Rent-burdened family HHs: single female headed in LF
x <- rent_bur_30to50 %>% filter(FES==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=300.64
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2431, 3609]
# Prop SE: rent-burdened family single female headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0425
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.447, 0.614]
# Rent-burdened family HHs: single female headed not in LF
x <- rent_bur_30to50 %>% filter(FES==8)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=141.70
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [785, 1341]
# Prop SE: rent-burdened family, single female headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0223
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.143, 0.23]
# Rent-burdened family HHs: married couple family not in LF
x <- rent_bur_30to50 %>% filter(FES==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=92.89
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [182, 546]
# Prop SE: rent-burdened family, married couple family not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) #0.0159
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0328, 0.095]
#--------------- STANDARD ERRORS - Severely Rent-Burdened Family HHs -----------
rep.names <- paste0('WGTP', 1:80)
# Severely Rent Burdened Family HHs (>=30% of income on rent)
all <- rent_bur_50to101 %>% filter(FES %in% 1:8)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=471.60
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [7858, 9404]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [7707, 9555]
# Severely Rent-burdened family HHs: married couple, at least one HHer in LF
x <- rent_bur_50to101 %>% filter(FES %in% 1:3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=109.73
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [293, 723]
# Prop SE: Severely rent-burdened family, married couple, at least one HHer in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0123
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0347, 0.083]
# Severely rent-burdened family HHs: single male headed in LF
x <- rent_bur_50to101 %>% filter(FES==5)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=67.65
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [51.4, 316.6]
# Prop SE: Severely rent-burdened family, single male headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0078
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0061, 0.0365]
# Severely rent-burdened family HHs: single male headed not in LF
x <- rent_bur_50to101 %>% filter(FES==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=101.40
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [160, 558]
# Prop SE: Severely rent-burdened family, single male headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0115
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.019, 0.0642]
# Severely rent-burdened family HHs: single female headed in LF
x <- rent_bur_50to101 %>% filter(FES==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=316.17
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [3154, 4394]
# Prop SE: Severely rent-burdened family single female headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0278
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.383, 0.492]
# Severely rent-burdened family HHs: single female headed not in LF
x <- rent_bur_50to101 %>% filter(FES==8)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=370.62
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2807, 4259]
# Prop SE: Severely rent-burdened family, single female headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0367
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.337, 0.481]
# Severely rent-burdened family HHs: married couple family not in LF
x <- rent_bur_50to101 %>% filter(FES==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=75.07
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [126, 420]
# Prop SE: Severely rent-burdened family, married couple family not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) #0.00852
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0149, 0.04830]
#-------------------------------------------------------------------------------
# HHT: Household/Family Type
#-------------------------------------------------------------------------------
# 1 = Married couple HH
# 2 = Other family HH: Male HHer, no spouse present
# 3 = Other family HH: Female HHer, no spouse present
# 4-7 Non family HH
# 4 = Male HHer, living alone; 5 = Male HHer, not living alone
# 6 = Female HHer, living alone; 7 = Female HHer, not living alone
# All rental HHs
tapply(hh_rental$WGTP, list(hh_rental$HHT), sum)
prop.table(tapply(hh_rental$WGTP, list(hh_rental$HHT), sum))
# Any rent burdened households (>=30% income)
tapply(rent_bur$WGTP, list(rent_bur$HHT), sum)
prop.table(tapply(rent_bur$WGTP, list(rent_bur$HHT), sum))
# Non rent burdened households (>=30% and <50%)
tapply(rent_bur_no$WGTP, list(rent_bur_no$HHT), sum)
prop.table(tapply(rent_bur_no$WGTP, list(rent_bur_no$HHT), sum))
# Rent burdened households (>=30% and <50%)
tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$HHT), sum)
prop.table(tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$HHT), sum))
# Severely rent burdened households >=50%
tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$HHT), sum)
prop.table(tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$HHT), sum))
#------------------ STANDARD ERRORS - All Rental HHs ---------------------------
rep.names <- paste0('WGTP', 1:80)
# All Rental HHs
all <- hh_rental %>% filter(HHT %in% 1:7)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=772.67
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [52581, 55115]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [52334, 55362]
# Rental HHs: Married couple
x <- hh_rental %>% filter(HHT==1)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=317.50
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [4633, 5877]
# Prop SE: married couple / all rental HHs
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00572
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0864, 0.1088]
# Rental HHs: single female headed
x <- hh_rental %>% filter(HHT==3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=497.80
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [14463, 16415]
# Prop SE: single female headed / all rental HHs
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00828
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.27, 0.303]
# Rental HHs: male living alone
x <- hh_rental %>% filter(HHT==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=507.56
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [11324, 13314]
# Prop SE: male living alone / all rental HHs
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00884
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.211, 0.246]
# Rental HHs: female living alone
x <- hh_rental %>% filter(HHT==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=508.73
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [10750, 12744]
# Prop SE: female living alone / all rental HHs
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0089
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.201, 0.236]
# Rental HHs: other HH type
x <- hh_rental %>% filter(HHT==2 | HHT==5 | HHT==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=407.67
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [8289, 9887]
# Prop SE: other HH type / all rental HHs
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00717
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.155, 0.1830]
#------------------ STANDARD ERRORS - Any rent-burdened HHs --------------------
rep.names <- paste0('WGTP', 1:80)
# Any rent-burdened HHs (>=30% of income on rent)
all <- rent_bur %>% filter(HHT %in% 1:7)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=735.47
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [28727, 31139]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [28491, 31375]
# Any rent-burdened HHs: Married couple
x <- rent_bur %>% filter(HHT==1)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=179.92
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [1441, 2147]
# Prop SE: any rent-burdened married couple / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00583
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0485, 0.0714]
# Any rent-burdened HHs: single female headed
x <- rent_bur %>% filter(HHT==3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=539.32
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [10333, 12447]
# Prop SE: Any rent-burdened single female headed / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0154
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.35, 0.411]
# Any rent-burdened HHs: male living alone
x <- rent_bur %>% filter(HHT==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=432.26
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [5871, 7565]
# Prop SE: Any rent-burdened male living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0134
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.198, 0.251]
# Any rent-burdened HHs: female living alone
x <- rent_bur %>% filter(HHT==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=418.25
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [6136, 7776]
# Prop SE: any rent-burdened female living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0128
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.207, 0.257]
# Any rent-burdened HHs: other HH type
x <- rent_bur %>% filter(HHT==2 | HHT==5 | HHT==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=245.90
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2593, 3557]
# Prop SE: any rent-burdened other HH type / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0078
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0874, 0.1181]
#------------------ STANDARD ERRORS - Non rent-burdened HHs --------------------
rep.names <- paste0('WGTP', 1:80)
# Non rent-burdened HHs (<30% of income on rent)
all <- rent_bur_no %>% filter(HHT %in% 1:7)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=801.20
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [21119, 23747]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [20863, 24003]
# Non rent-burdened HHs: Married couple
x <- rent_bur_no %>% filter(HHT==1)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=284.47
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2881, 3997]
# Prop SE: non rent-burdened married couple / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0114
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.131, 0.176]
# Non rent-burdened HHs: single female headed
x <- rent_bur_no %>% filter(HHT==3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=327.07
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [3131, 4413]
# Prop SE: Non rent-burdened single female headed / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0133
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.142, 0.194]
# Non rent-burdened HHs: male living alone
x <- rent_bur_no %>% filter(HHT==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=415.52
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [4206, 5834]
# Prop SE: Non rent-burdened male living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0167
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.191, 0.257]
# Non rent-burdened HHs: female living alone
x <- rent_bur_no %>% filter(HHT==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=375.42
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [3647, 5119]
# Prop SE: Non rent-burdened female living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0152
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.166, 0.225]
# Non rent-burdened HHs: other HH type
x <- rent_bur_no %>% filter(HHT==2 | HHT==5 | HHT==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=391.06
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [5053, 6585]
# Prop SE: non rent-burdened other HH type / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0148
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.23, 0.288]
#------------------ STANDARD ERRORS - Rent-burdened HHs ------------------------
rep.names <- paste0('WGTP', 1:80)0
# Rent-burdened HHs (>=30% and <50% of income on rent)
all <- rent_bur_30to50 %>% filter(HHT %in% 1:7)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=548.70
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [12065, 13865]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [11890, 14040]
# Rent-burdened HHs: Married couple
x <- rent_bur_30to50 %>% filter(HHT==1)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=134.31
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [733, 1259]
# Prop SE: rent-burdened married couple / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0098
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0575, 0.0961]
# Rent-burdened HHs: single female headed
x <- rent_bur_30to50 %>% filter(HHT==3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=303.21
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [3489, 4677]
# Prop SE: Rent-burdened single female headed / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0192
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.277, 0.353]
# Rent-burdened HHs: male living alone
x <- rent_bur_30to50 %>% filter(HHT==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=314.68
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2738, 3972]
# Prop SE: Rent-burdened male living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0217
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.216, 0.301]
# Rent-burdened HHs: female living alone
x <- rent_bur_30to50 %>% filter(HHT==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=269.37
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2291, 3347]
# Prop SE: Rent-burdened female living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0186
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.181, 0.254]
# Rent-burdened HHs: other HH type
x <- rent_bur_30to50 %>% filter(HHT==2 | HHT==5 | HHT==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=216.78
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [1287, 2137]
# Prop SE: rent-burdened other HH type / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0158
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.101, 0.163]
#------------------ STANDARD ERRORS - Severely Rent-burdened HHs ---------------
rep.names <- paste0('WGTP', 1:80)0
# Severely rent-burdened HHs (>=50% income on rent)
all <- rent_bur_50to101 %>% filter(HHT %in% 1:7)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=579.98
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [16017, 17919]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [15831, 18105]
# Severely rent-burdened HHs: Married couple
x <- rent_bur_50to101 %>% filter(HHT==1)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=130.55
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [542, 1054]
# Prop SE: Severely rent-burdened married couple / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0075
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0323, 0.0618]
# Severely rent-burdened HHs: single female headed
x <- rent_bur_50to101 %>% filter(HHT==3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=472.02
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [6382, 8232]
# Prop SE: Severely rent-burdened single female headed / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0236
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.384, 0.477]
# Severely rent-burdened HHs: male living alone
x <- rent_bur_50to101 %>% filter(HHT==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=340.45
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2696, 4030]
# Prop SE: Severely rent-burdened male living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0189
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.161, 0.235]
# Severely rent-burdened HHs: female living alone
x <- rent_bur_50to101 %>% filter(HHT==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=289.44
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [3570, 4704]
# Prop SE: Severely rent-burdened female living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0149
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.215, 0.273]
# Severely rent-burdened HHs: other HH type
x <- rent_bur_50to101 %>% filter(HHT==2 | HHT==5 | HHT==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=175.04
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [1020, 1706]
# Prop SE: Severely rent-burdened other HH type / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0099
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.061, 0.010]
#------------------------------------------------------------------------------
# HHL: Household Language
#------------------------------------------------------------------------------
# (1) English only, (2) Spanish, (3) Other Indo-European languages
# (4) Asian and Pacific Island languages, (5) Other languages
# All Rental HHs
tapply(hh_rental$WGTP, list(hh_rental$HHL), sum)
prop.table(tapply(hh_rental$WGTP, list(hh_rental$HHL), sum))
# Largest categories: 1 (79%), 2 (15%), (3%)
# Any rent burdened households (>=30% income)
tapply(rent_bur$WGTP, list(rent_bur$HHL), sum)
prop.table(tapply(rent_bur$WGTP, list(rent_bur$HHL), sum))
# Largest categories: 1 (79%), 2 (17%), 3 (2%)
# Non rent burdened households (>=30% income)
tapply(rent_bur_no$WGTP, list(rent_bur_no$HHL), sum)
prop.table(tapply(rent_bur_no$WGTP, list(rent_bur_no$HHL), sum))
# Largest categories: 1 (81%), 2 (11%), 3 (4%)
# Rent burdened households (>=30% and <50%)
tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$HHL), sum)
prop.table(tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$HHL), sum))
# Largest categories: 1 (81%), 2 (15%), 3 (2%)
# Severely rent burdened households >=50%
tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$HHL), sum)
prop.table(tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$HHL), sum))
# Largest categories: 1 (76%), 2 (19%), 3 (3%)
# I find it interesting that:
# Moving from rent burdened to severely rent burdened, proportion of households
# that are English speaking decreases, and proportion that are Spanish and speaking increases
#------------------------------------------------------------------------------
# MV: Rent Burden by Length of Time in Unit
#------------------------------------------------------------------------------
# 1 = 12 mos or less; 2 = 13 to 23 mos; 3 = 2-4 years; 4 = 5-9 years
# 5 = 10-19 years; 6 = 20-29 years; 7 = 30+ years
# All Rental HHs
tapply(hh_rental$WGTP, list(hh_rental$MV), sum)
prop.table(tapply(hh_rental$WGTP, list(hh_rental$MV), sum))
# Any rent burdened households (>=30% income)
tapply(rent_bur$WGTP, list(rent_bur$MV), sum)
prop.table(tapply(rent_bur$WGTP, list(rent_bur$MV), sum))
# Non rent burdened households (>=30% income)
tapply(rent_bur_no$WGTP, list(rent_bur_no$MV), sum)
prop.table(tapply(rent_bur_no$WGTP, list(rent_bur_no$MV), sum))
# Rent burdened households (>=30% and <50%)
tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$MV), sum)
prop.table(tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$MV), sum))
# Severely rent burdened households >50%
tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$MV), sum)
prop.table(tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$MV), sum))
# Calculate average and standard error for length of time in rental unit
time_in_unit <- select(hh_roc, MV, WGTP) %>% tally(wt=WGTP)
weighted.mean(all_roc$MV, all_roc$WGTP_HH, na.rm==TRUE) # 3.56 (mean between 2-4 years)
w.median(all_roc$MV, all_roc$WGTP_HH) # 3 (median between 2-4 years)
#------------------------------------------------------------------------------
# MV: Length of Time in Unit by Income
#------------------------------------------------------------------------------
# Use AMI categories created previously (ami_mon_cat)
# 1=0-30%, 2=30-50%, 3=50-60%, 4=60-80%, 5=80-100%, 6=100-120%, 7=120%+
d <- hh_rental %>% filter(ami_mon_cat %in% 1:4)
#-------------------------- All Rental HHs
z <- d
w <- z$WGTP
v <- z$ami_mon_cat
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
weighted.mean(z$HINCP_adj, w) # $36,361.46
var <- wtd.var(z$HINCP_adj, w) # from HMISC package
sd <- sqrt(var) # $41,857.30
# use "survey" package to set survey design and specify replicate weights
pumsd_hh <- z %>%
as_survey_rep(weights = WGTP, repweights = starts_with("WGTP"), combined_weights = TRUE)
# calculate mean and std. error of HINCP_adj
pumsd_hh %>% filter(!is.na(HINCP_adj)) %>%
summarise(survey_mean(HINCP_adj, na.rm = TRUE)) # mean=36361, se=454
# ------------------------- HHs in unit 12 mos or less
z <- d %>% filter(MV==1)
w <- z$WGTP
v <- z$ami_mon_cat
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
weighted.mean(z$HINCP_adj, w) # $36,626.06
var <- wtd.var(z$HINCP_adj, w)
sd <- sqrt(var) # $33,459.45
# use "survey" package to set survey design and specify replicate weights
pumsd_hh <- z %>%
as_survey_rep(weights = WGTP, repweights = starts_with("WGTP"), combined_weights = TRUE)
# calculate mean and std. error of HINCP_adj
pumsd_hh %>% filter(!is.na(HINCP_adj)) %>%
summarise(survey_mean(HINCP_adj, na.rm = TRUE)) # mean=36626, se=601
# -------------------------- HHs in unit 13-23 mos
z <- d %>% filter(MV==2)
w <- z$WGTP
v <- z$ami_mon_cat
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
weighted.mean(z$HINCP_adj, w) # $43,721.19
var <- wtd.var(z$HINCP_adj, w)
sd <- sqrt(var) # $49,303.48
# use "survey" package to set survey design and specify replicate weights
pumsd_hh <- z %>%
as_survey_rep(weights = WGTP, repweights = starts_with("WGTP"), combined_weights = TRUE)
# calculate mean and std. error of HINCP_adj
pumsd_hh %>% filter(!is.na(HINCP_adj)) %>%
summarise(survey_mean(HINCP_adj, na.rm = TRUE)) # mean=43721, se=1594
# -------------------------- HHs in unit 2-4 years
z <- d %>% filter(MV==3)
w <- z$WGTP
v <- z$ami_mon_cat
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
weighted.mean(z$HINCP_adj, w) # $35,487.76
var <- wtd.var(z$HINCP_adj, w)
sd <- sqrt(var) # $35,914.70
# use "survey" package to set survey design and specify replicate weights
pumsd_hh <- z %>%
as_survey_rep(weights = WGTP, repweights = starts_with("WGTP"), combined_weights = TRUE)
# calculate mean and std. error of HINCP_adj
pumsd_hh %>% filter(!is.na(HINCP_adj)) %>%
summarise(survey_mean(HINCP_adj, na.rm = TRUE)) # mean=35488, se=817
# --------------------------- HHs in unit 5-9 years
z <- d %>% filter(MV==4)
w <- z$WGTP
v <- z$ami_mon_cat
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
weighted.mean(z$HINCP_adj, w) # $35,991.89
var <- wtd.var(z$HINCP_adj, w)
sd <- sqrt(var) # $60,767.78
# use "survey" package to set survey design and specify replicate weights
pumsd_hh <- z %>%
as_survey_rep(weights = WGTP, repweights = starts_with("WGTP"), combined_weights = TRUE)
# calculate mean and std. error of HINCP_adj
pumsd_hh %>% filter(!is.na(HINCP_adj)) %>%
summarise(survey_mean(HINCP_adj, na.rm = TRUE)) # mean=35992, se=1389
# ---------------------------- HHs in unit 10+ years
z <- d %>% filter(MV %in% 5:7)
w <- z$WGTP
v <- z$ami_mon_cat
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
weighted.mean(z$HINCP_adj, w) # $32,305.18
var <- wtd.var(z$HINCP_adj, w)
sd <- sqrt(var) # $32,915.49
stderror <- sd/sqrt(sum(w)) # 378
# use "survey" package to set survey design and specify replicate weights
pumsd_hh <- z %>%
as_survey_rep(weights = WGTP, repweights = starts_with("WGTP"), combined_weights = TRUE)
# calculate mean and std. error of HINCP_adj
pumsd_hh %>% filter(!is.na(HINCP_adj)) %>%
summarise(survey_mean(HINCP_adj, na.rm = TRUE)) # mean=32035, se=901
#------------------------------------------------------------------------------
# MV: Length of Time in Rental Unit by Presence of Children (HUPAC)
#------------------------------------------------------------------------------
# HUPAC
# 1=Children under 6 only; 2=Children 6-17 only; 3=Children <6 & 6-17; 4=No children
d <- hh_rental %>% filter(HUPAC %in% 1:4)
#-------------------------- All Rental HHs
z <- d
w <- z$WGTP
v <- z$HUPAC
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 12 mos or less
z <- d %>% filter(MV==1)
w <- z$WGTP
v <- z$HUPAC
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 13-23 mos
z <- d %>% filter(MV==2)
w <- z$WGTP
v <- z$HUPAC
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 2-4 years
z <- d %>% filter(MV==3)
w <- z$WGTP
v <- z$HUPAC
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 5-9 years
z <- d %>% filter(MV==4)
w <- z$WGTP
v <- z$HUPAC
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 10+ years
z <- d %>% filter(MV %in% 5:7)
w <- z$WGTP
v <- z$HUPAC
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
#===============================================================================
# Characteristics of those who are rent burdened - merged data
#===============================================================================
# Generate categories for rent burden in merged data
all_roc$GRPIP_cat <- cut(all_roc$GRPIP, breaks = c(0, 30, 50, 60, 80, 100, 10000000), labels = c(1,2,3,4,5,6), right = TRUE)
summary(all_roc$GRPIP_cat)
prop.table(tapply(all_roc$WGTP_HH, list(all_roc$GRPIP_cat), sum))
# Sanity check: the proportions are the same as when I used just the HH data,
# which means the HH weights I created in the merged dataset are correct
# (I created the correct weight variable in Stata. The variable is "WGTP_HH")
# Generate categories for age in merged data
all_roc$age_cat <- cut(all_roc$AGEP, breaks = c(0, 20, 30, 50, 70, 10000000), labels = c(1,2,3,4,5), right = TRUE)
# Generate categories for race in merged data
all_roc$RACE = ifelse(all_roc$HISP == 01, all_roc$RAC1P, 10)
# Generate categories for employment status in merged data
# ESR (employment status recode)
# 1=civilian employed, at work; 2=civilian employed, with a job but not at work
# 3=unemployed; 4=Armed forces, at work; 5=Armed forces, with a job but not at work
# 6=Not in labor force
all_roc$EMP = ifelse(all_roc$ESR %in% 1:2 | all_roc$ESR %in% 4:5, 1, all_roc$ESR) #EMP=1,3,6
all_roc$EMP = ifelse(all_roc$EMP == 6, 4, all_roc$EMP) #EMP: 1=employed, 3=unemployed, 4=out of LF
# Generate part-time and full-time for EMP
all_roc$EMP = ifelse(all_roc$EMP==1 & all_roc$WKHP<40, 2, all_roc$EMP)
summary(all_roc$EMP)
#EMP: 1=full-time employed, 2=part-time employed, 3=unemployed, 4=out of labor force
# Generate rent burdened category variables
rent_all <- all_roc %>% filter(TEN==3) # All rental HHs
rent_bur_all <- all_roc %>% filter(GRPIP_cat %in% 2:6) # >=30% income on rent
rent_bur_non <- all_roc %>% filter(GRPIP_cat==1)
rent_bur_slight <- all_roc %>% filter(GRPIP_cat==2) # >=30% and <50% income on rent
rent_bur_severe <- all_roc %>% filter(GRPIP_cat %in% 3:6) # >=50% income on rent
#-------------------------------------------------------------------------------
# Length of time in rental unit by age (MV, age_cat)
#-------------------------------------------------------------------------------
# 1 = 12 mos or less; 2 = 13 to 23 mos; 3 = 2-4 years; 4 = 5-9 years
# 5 = 10-19 years; 6 = 20-29 years; 7 = 30+ years
# Restrict dataset to head of household (HOH)
# Use SPORDER to collapse data and create one obs. per HH of head of household
rent_hoh <- rent_all %>%
arrange(SERIALNO, SPORDER) %>%
filter(SPORDER==1)
# Generate age categories for <30, 30-50, 50-70, 70+
rent_hoh$age_cat_2 <- cut(rent_hoh$AGEP, breaks = c(0, 30, 50, 70, 10000000), labels = c(1,2,3,4), right = TRUE)
# All Rental HOHs
z <- rent_hoh
tapply(z$PWGTP, list(z$age_cat_2), sum)
prop.table(tapply(z$PWGTP, list(z$age_cat_2), sum))
# HOHs in unit 12 mos or less
z <- rent_hoh %>% filter(MV==1)
tapply(z$PWGTP, list(z$age_cat_2), sum)
prop.table(tapply(z$PWGTP, list(z$age_cat_2), sum))
# HOHs in unit 13-23 mos
z <- rent_hoh %>% filter(MV==2)
tapply(z$PWGTP, list(z$age_cat_2), sum)
prop.table(tapply(z$PWGTP, list(z$age_cat_2), sum))
# HOHs in unit 2-4 years
z <- rent_hoh %>% filter(MV==3)
tapply(z$PWGTP, list(z$age_cat_2), sum)
prop.table(tapply(z$PWGTP, list(z$age_cat_2), sum))
# HOHs in unit 5-9 years
z <- rent_hoh %>% filter(MV==4)
tapply(z$PWGTP, list(z$age_cat_2), sum)
prop.table(tapply(z$PWGTP, list(z$age_cat_2), sum))
# HOHs in unit 10+ years
z <- rent_hoh %>% filter(MV %in% 5:7)
tapply(z$PWGTP, list(z$age_cat_2), sum)
prop.table(tapply(z$PWGTP, list(z$age_cat_2), sum))
#-------------------------------------------------------------------------------
# Length of time in rental unit by race (MV, RACE)
#-------------------------------------------------------------------------------
# RACE: 1=White, 2=Black, 10=Hispanic
# All Rental HOHs
z <- rent_hoh
tapply(z$PWGTP, list(z$RACE), sum)
prop.table(tapply(z$PWGTP, list(z$RACE), sum))
# HOHs in unit 12 mos or less
z <- rent_hoh %>% filter(MV==1)
tapply(z$PWGTP, list(z$RACE), sum)
prop.table(tapply(z$PWGTP, list(z$RACE), sum))
# HOHs in unit 13-23 mos
z <- rent_hoh %>% filter(MV==2)
tapply(z$PWGTP, list(z$RACE), sum)
prop.table(tapply(z$PWGTP, list(z$RACE), sum))
# HOHs in unit 2-4 years
z <- rent_hoh %>% filter(MV==3)
tapply(z$PWGTP, list(z$RACE), sum)
prop.table(tapply(z$PWGTP, list(z$RACE), sum))
# HOHs in unit 5-9 years
z <- rent_hoh %>% filter(MV==4)
tapply(z$PWGTP, list(z$RACE), sum)
prop.table(tapply(z$PWGTP, list(z$RACE), sum))
# HOHs in unit 10+ years
z <- rent_hoh %>% filter(MV %in% 5:7)
tapply(z$PWGTP, list(z$RACE), sum)
prop.table(tapply(z$PWGTP, list(z$RACE), sum))
#-------------------------------------------------------------------------------
# Length of time in rental unit by employment status (MV; EMP)
#-------------------------------------------------------------------------------
# EMP: 1=full-time employed, 2=part-time employed, 3=unemployed, 4=out of labor force
# Restrict to head of households only (HOH)
d <- rent_hoh %>% filter(EMP %in% 1:4)
#-------------------------- All Rental HHs
z <- d
w <- z$PWGTP
v <- z$EMP
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 12 mos or less
z <- d %>% filter(MV==1)
w <- z$PWGTP
v <- z$EMP
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 13-23 mos
z <- d %>% filter(MV==2)
w <- z$PWGTP
v <- z$EMP
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 2-4 years
z <- d %>% filter(MV==3)
w <- z$PWGTP
v <- z$EMP
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 5-9 years
z <- d %>% filter(MV==4)
w <- z$PWGTP
v <- z$EMP
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 10+ years
z <- d %>% filter(MV %in% 5:7)
w <- z$PWGTP
v <- z$EMP
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
#-------------------------------------------------------------------------------
# AGEP: Age of Single-Renter HHs
#-------------------------------------------------------------------------------
# What is the average age of male and female single rental HHs? (see HHT section above)
hh_single <- all_roc %>% filter((HHT==4 | HHT==6) & TEN==3)
hh_single_f <- all_roc %>% filter(HHT==6 & TEN==3)
hh_single_m <- all_roc %>% filter(HHT==4 & TEN ==3)
# Single HHs rent-burden categories
rent_bur_all_s <- rent_bur_all %>% filter(HHT==4 | HHT==6)
rent_bur_non_s <- rent_bur_non %>% filter(HHT==4 | HHT==6)
rent_bur_slight_s <- rent_bur_slight %>% filter(HHT==4 | HHT==6)
rent_bur_severe_s <- rent_bur_severe %>% filter(HHT==4 | HHT==6)
# Female single HHs rent-burden categories
rent_bur_all_sf <- rent_bur_all %>% filter(HHT==6)
rent_bur_non_sf <- rent_bur_non %>% filter(HHT==6)
rent_bur_slight_sf <- rent_bur_slight %>% filter(HHT==6)
rent_bur_severe_sf <- rent_bur_severe %>% filter(HHT==6)
# Male single HHs rent-burden categories
rent_bur_all_sm <- rent_bur_all %>% filter(HHT==4)
rent_bur_non_sm <- rent_bur_non %>% filter(HHT==4)
rent_bur_slight_sm <- rent_bur_slight %>% filter(HHT==4)
rent_bur_severe_sm <- rent_bur_severe %>% filter(HHT==4)
# Average age of single HHs, by gender
weighted.mean(hh_single$AGEP, hh_single$WGTP_HH) # all: 48.8 years old
weighted.mean(hh_single$AGEP, hh_single$PWGTP) # all: 48.8 years old
weighted.mean(hh_single_f$AGEP, hh_single_f$PWGTP) # female: 50.1 years old
weighted.mean(hh_single_m$AGEP, hh_single_m$PWGTP) # male: 47.6 years old
# Average age of rent-burdened single HHs
weighted.mean(rent_bur_single$AGEP, rent_bur_single$PWGTP) # all: 51.1 years old
weighted.mean(rent_bur_single_f$AGEP, rent_bur_single_f$PWGTP) # female: 51.9 years old
weighted.mean(rent_bur_single_m$AGEP, rent_bur_single_m$PWGTP) # male: 50.3 years old
#----------------------------- Single renter HHs
tapply(hh_single$PWGTP, list(hh_single$age_cat), sum)
prop.table(tapply(hh_single$PWGTP, list(hh_single$age_cat), sum))
# Any rent-burdened single renter HHs
tapply(rent_bur_all_s$PWGTP, list(rent_bur_all_s$age_cat), sum)
prop.table(tapply(rent_bur_all_s$PWGTP, list(rent_bur_all_s$age_cat), sum))
# Non rent-burdened single renter HHs
tapply(rent_bur_non_s$PWGTP, list(rent_bur_non_s$age_cat), sum)
prop.table(tapply(rent_bur_non_s$PWGTP, list(rent_bur_non_s$age_cat), sum))
# Rent-burdened single renter HHs
tapply(rent_bur_slight_s$PWGTP, list(rent_bur_slight_s$age_cat), sum)
prop.table(tapply(rent_bur_slight_s$PWGTP, list(rent_bur_slight_s$age_cat), sum))
# Severely rent-burdened single renter HHs
tapply(rent_bur_severe_s$PWGTP, list(rent_bur_severe_s$age_cat), sum)
prop.table(tapply(rent_bur_severe_s$PWGTP, list(rent_bur_severe_s$age_cat), sum))
#-------------------------------- Female single renter HHs
tapply(hh_single_f$PWGTP, list(hh_single_f$age_cat), sum)
prop.table(tapply(hh_single_f$PWGTP, list(hh_single_f$age_cat), sum))
# Any rent-burdened female single renter HHs
tapply(rent_bur_all_sf$PWGTP, list(rent_bur_all_sf$age_cat), sum)
prop.table(tapply(rent_bur_all_sf$PWGTP, list(rent_bur_all_sf$age_cat), sum))
# Non rent-burdened female single renter HHs
tapply(rent_bur_non_sf$PWGTP, list(rent_bur_non_sf$age_cat), sum)
prop.table(tapply(rent_bur_non_sf$PWGTP, list(rent_bur_non_sf$age_cat), sum))
# Rent-burdened female single renter HHs
tapply(rent_bur_slight_sf$PWGTP, list(rent_bur_slight_sf$age_cat), sum)
prop.table(tapply(rent_bur_slight_sf$PWGTP, list(rent_bur_slight_sf$age_cat), sum))
# Severely rent-burdened female single renter HHs
tapply(rent_bur_severe_sf$PWGTP, list(rent_bur_severe_sf$age_cat), sum)
prop.table(tapply(rent_bur_severe_sf$PWGTP, list(rent_bur_severe_sf$age_cat), sum))
#-------------------------------- Male single renter HHs
tapply(hh_single_m$PWGTP, list(hh_single_m$age_cat), sum)
prop.table(tapply(hh_single_m$PWGTP, list(hh_single_m$age_cat), sum))
# Any rent-burdened male single renter HHs
tapply(rent_bur_all_sm$PWGTP, list(rent_bur_all_sm$age_cat), sum)
prop.table(tapply(rent_bur_all_sm$PWGTP, list(rent_bur_all_sm$age_cat), sum))
# Non rent-burdened male single renter HHs
tapply(rent_bur_non_sm$PWGTP, list(rent_bur_non_sm$age_cat), sum)
prop.table(tapply(rent_bur_non_sm$PWGTP, list(rent_bur_non_sm$age_cat), sum))
# Rent-burdened male single renter HHs
tapply(rent_bur_slight_sm$PWGTP, list(rent_bur_slight_sm$age_cat), sum)
prop.table(tapply(rent_bur_slight_sm$PWGTP, list(rent_bur_slight_sm$age_cat), sum))
# Severely rent-burdened male single renter HHs
tapply(rent_bur_severe_sm$PWGTP, list(rent_bur_severe_sm$age_cat), sum)
prop.table(tapply(rent_bur_severe_sm$PWGTP, list(rent_bur_severe_sm$age_cat), sum))
#-------------------------------------------------------------------------------
# RACE: Race of All Renter HHs
#-------------------------------------------------------------------------------
# RACE variable
# 1 = White alone; 2 = Black alone; 3 = American Indian alone; 4 = Alaska Native alone
# 5 = American Indian & Alaskan Native; 6 = Asian alone; 7 = Native Hawaiian / Pacific Islander alone
# 8 = Some other race alone; 9 = Two or more races; 10 = Hispanic
# (Categories for race generated in previous section in merged data)
# Race proportions in Rochester population
prop.table(tapply(all_roc$PWGTP, list(all_roc$RACE), sum))
# 36.6% White, 38.3% Black, 18.4% Hispanic
# For now I'll look at the population. I need to figure out how to collapse at the
# HH level after creating the RACE variable, to do the HH analysis (will be more accurate)
# Race of renter household population
tapply(rent_all$PWGTP, list(rent_all$RACE), sum)
prop.table(tapply(rent_all$PWGTP, list(rent_all$RACE), sum))
# Race of all rent burdened population (>=30% income)
tapply(rent_bur_all$PWGTP, list(rent_bur_all$RACE), sum)
prop.table(tapply(rent_bur_all$PWGTP, list(rent_bur_all$RACE), sum))
# Race of non rent-burdened population (<30% income)
tapply(rent_bur_non$PWGTP, list(rent_bur_non$RACE), sum)
prop.table(tapply(rent_bur_non$PWGTP, list(rent_bur_non$RACE), sum))
# Race of slightly rent burdened population (>=30% and <50% income)
tapply(rent_bur_slight$PWGTP, list(rent_bur_slight$RACE), sum)
prop.table(tapply(rent_bur_slight$PWGTP, list(rent_bur_slight$RACE), sum))
# Race of severely rent burdened population (>=50% income)
tapply(rent_bur_severe$PWGTP, list(rent_bur_severe$RACE), sum)
prop.table(tapply(rent_bur_severe$PWGTP, list(rent_bur_severe$RACE), sum))
# Race of single renter HHs (see section HHT above)
prop.table(tapply(hh_single$PWGTP, list(hh_single$RACE), sum))
# Race of rent-burdened single renter HHs
prop.table(tapply(rent_bur_single$PWGTP, list(rent_bur_single$RACE), sum))
#-------------------------------------------------------------------------------
# MAR: Marital Status of Single-Renter HHs
#-------------------------------------------------------------------------------
# Marital status of single-renter HHs
# 1 = Married, 2 = Widowed, 3 = Divorced, 4 = Separated, 5 = Never married
#----------------------------- Single renter HHs
tapply(hh_single$PWGTP, list(hh_single$MAR), sum)
prop.table(tapply(hh_single$PWGTP, list(hh_single$MAR), sum))
# Any rent-burdened single renter HHs
tapply(rent_bur_all_s$PWGTP, list(rent_bur_all_s$MAR), sum)
prop.table(tapply(rent_bur_all_s$PWGTP, list(rent_bur_all_s$MAR), sum))
# Non rent-burdened single renter HHs
tapply(rent_bur_non_s$PWGTP, list(rent_bur_non_s$MAR), sum)
prop.table(tapply(rent_bur_non_s$PWGTP, list(rent_bur_non_s$MAR), sum))
# Rent-burdened single renter HHs
tapply(rent_bur_slight_s$PWGTP, list(rent_bur_slight_s$MAR), sum)
prop.table(tapply(rent_bur_slight_s$PWGTP, list(rent_bur_slight_s$MAR), sum))
# Severely rent-burdened single renter HHs
tapply(rent_bur_severe_s$PWGTP, list(rent_bur_severe_s$MAR), sum)
prop.table(tapply(rent_bur_severe_s$PWGTP, list(rent_bur_severe_s$MAR), sum))
#-------------------------------- Female single renter HHs
tapply(hh_single_f$PWGTP, list(hh_single_f$MAR), sum)
prop.table(tapply(hh_single_f$PWGTP, list(hh_single_f$MAR), sum))
# Any rent-burdened female single renter HHs
tapply(rent_bur_all_sf$PWGTP, list(rent_bur_all_sf$MAR), sum)
prop.table(tapply(rent_bur_all_sf$PWGTP, list(rent_bur_all_sf$MAR), sum))
# Non rent-burdened female single renter HHs
tapply(rent_bur_non_sf$PWGTP, list(rent_bur_non_sf$MAR), sum)
prop.table(tapply(rent_bur_non_sf$PWGTP, list(rent_bur_non_sf$MAR), sum))
# Rent-burdened female single renter HHs
tapply(rent_bur_slight_sf$PWGTP, list(rent_bur_slight_sf$MAR), sum)
prop.table(tapply(rent_bur_slight_sf$PWGTP, list(rent_bur_slight_sf$MAR), sum))
# Severely rent-burdened female single renter HHs
tapply(rent_bur_severe_sf$PWGTP, list(rent_bur_severe_sf$MAR), sum)
prop.table(tapply(rent_bur_severe_sf$PWGTP, list(rent_bur_severe_sf$MAR), sum))
#-------------------------------- Male single renter HHs
tapply(hh_single_m$PWGTP, list(hh_single_m$MAR), sum)
prop.table(tapply(hh_single_m$PWGTP, list(hh_single_m$MAR), sum))
# Any rent-burdened male single renter HHs
tapply(rent_bur_all_sm$PWGTP, list(rent_bur_all_sm$MAR), sum)
prop.table(tapply(rent_bur_all_sm$PWGTP, list(rent_bur_all_sm$MAR), sum))
# Non rent-burdened male single renter HHs
tapply(rent_bur_non_sm$PWGTP, list(rent_bur_non_sm$MAR), sum)
prop.table(tapply(rent_bur_non_sm$PWGTP, list(rent_bur_non_sm$MAR), sum))
# Rent-burdened male single renter HHs
tapply(rent_bur_slight_sm$PWGTP, list(rent_bur_slight_sm$MAR), sum)
prop.table(tapply(rent_bur_slight_sm$PWGTP, list(rent_bur_slight_sm$MAR), sum))
# Severely rent-burdened male single renter HHs
tapply(rent_bur_severe_sm$PWGTP, list(rent_bur_severe_sm$MAR), sum)
prop.table(tapply(rent_bur_severe_sm$PWGTP, list(rent_bur_severe_sm$MAR), sum))
#-------------------------------------------------------------------------------
# Marital Status of Single-Renter HHs Ages 50-70
#-------------------------------------------------------------------------------
#----------------------------- 50-70 Single renter HHs
z <- hh_single %>% filter(age_cat==4)
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Any rent-burdened 50-70 single renter HHs
z <- rent_bur_all_s %>% filter(age_cat==4)
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Non rent-burdened 50-70 single renter HHs
z <- rent_bur_non_s %>% filter(age_cat==4)
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Rent-burdened 50-70 single renter HHs
z <- rent_bur_slight_s %>% filter(age_cat==4)
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Severely rent-burdened 50-70single renter HHs
z <- rent_bur_severe_s %>% filter(age_cat==4)
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
#-------------------------------- Female single renter HHs
z <- single_50to70_f
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Any rent-burdened female 50-70 single renter HHs
z <- rent_bur_all_sffifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Non rent-burdened female 50-70 single renter HHs
z <- rent_bur_non_sffifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Rent-burdened female 50-70 single renter HHs
z <- rent_bur_slight_sffifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Severely rent-burdened female 50-70 single renter HHs
z <- rent_bur_severe_sffifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
#-------------------------------- Male single renter HHs
z <- single_50to70_m
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Any rent-burdened male 50-70 single renter HHs
z <- rent_bur_all_smfifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Non rent-burdened male 50-70 single renter HHs
z <- rent_bur_non_smfifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Rent-burdened male 50-70 single renter HHs
z <- rent_bur_slight_smfifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Severely rent-burdened male 50-70 single renter HHs
z <- rent_bur_severe_smfifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
#----------------- Occupation of Female-Headed in Labor Force Family HHs ------------
female_lf <- rent_all %>% filter(FES==7) # Rental HH families lead by single female in LF
# Restrict data to HH head only
# Try function from "collapse" package
hh_female_lf_2 <- collap(female_lf, ~ SERIALNO, ffirst) # keep only first obs of SERIALNO
hh_test <- collap(all_roc, ~ SERIALNO, ffirst) # Test collapse with "all_roc" to see if I get # of obs in "hh_roc" data
# Not sure that this works correctly
# Use SPORDER to collapse data and create one obs. per HH
female_lf <- arrange(female_lf, SERIALNO, SPORDER)
hh_female_lf <- female_lf %>% filter(SPORDER==1)
# Occupation code: INDP, NAICSP
tapply(hh_female_lf$PWGTP, list(hh_female_lf$INDP), sum)
prop.table(tapply(hh_female_lf$PWGTP, list(hh_female_lf$INDP), sum))
# Create occupation categories (based on INDP codes, see data dictionary)
hh_female_lf$ind_cat <- cut(hh_female_lf$INDP, breaks = c(0, 300, 500, 700, 1000, 4000, 4600, 6000, 6400, 6800, 7200, 7800, 7900, 8300, 8500, 8700, 9300, 9600, 9900, 10000000),
labels = c("AGR", "EXT", "UTL", "CON", "MFG", "WHL", "RET", "TRN", "INF", "FIN", "PRF", "EDU", "MED", "SCA", "ENT", "SRV", "ADM", "MIL", "UEM"), right = TRUE)
# Create subsets of female single-headed HHs based on rent burden
hh_female_lf_bur <- hh_female_lf %>% filter(GRPIP_cat %in% 2:5)
hh_female_lf_bur_slight <- hh_female_lf %>% filter(GRPIP_cat==2)
hh_female_lf_bur_high <- hh_female_lf %>% filter(GRPIP_cat %in% 3:5)
hh_female_lf_bur_severe <- hh_female_lf %>% filter(GRPIP_cat==6)
# All single-female-headed-HHs in LF occupation categories
tapply(hh_female_lf$PWGTP, list(hh_female_lf$ind_cat), sum)
# Rent-burdened single-female-headed-HHs in LF occupation categories
tapply(hh_female_lf_bur$PWGTP, list(hh_female_lf_bur$ind_cat), sum)
# Slightly rent-burdened single-female-headed-HHs in LF occupation categories
tapply(hh_female_lf_bur_slight$PWGTP, list(hh_female_lf_bur_slight$ind_cat), sum)
# Highly rent-burdened single-female-headed-HHs in LF occupation categories
tapply(hh_female_lf_bur_high$PWGTP, list(hh_female_lf_bur_high$ind_cat), sum)
# Severely rent-burdened single-female-headed-HHs in LF occupation categories
tapply(hh_female_lf_bur_severe$PWGTP, list(hh_female_lf_bur_severe$ind_cat), sum)
#----------------- Rent Burden of Different Occupations -------------------------
# Create occupation categories (based on INDP codes, see data dictionary)
all_roc$ind_cat <- cut(all_roc$INDP, breaks = c(0, 300, 500, 700, 1000, 4000, 4600, 6000, 6400, 6800, 7200, 7800, 7900, 8300, 8500, 8700, 9300, 9600, 9900, 10000000),
labels = c("AGR", "EXT", "UTL", "CON", "MFG", "WHL", "RET", "TRN", "INF", "FIN", "PRF", "EDU", "MED", "SCA", "ENT", "SRV", "ADM", "MIL", "UEM"), right = TRUE)
# Manufacturing
mfg <- all_roc %>% filter(ind_cat=="MFG")
tapply(mfg$PWGTP, list(mfg$GRPIP_cat), sum)
prop.table(tapply(mfg$PWGTP, list(mfg$GRPIP_cat), sum)) # 36.8% are rent burdened
# Retail
ret <- all_roc %>% filter(ind_cat=="RET")
tapply(ret$PWGTP, list(ret$GRPIP_cat), sum)
prop.table(tapply(ret$PWGTP, list(ret$GRPIP_cat), sum)) # 51.7% are rent burdened
# Professional
prf <- all_roc %>% filter(ind_cat=="PRF")
tapply(prf$PWGTP, list(prf$GRPIP_cat), sum)
prop.table(tapply(prf$PWGTP, list(prf$GRPIP_cat), sum)) # 47.2% are rent burdened
# Medical
med <- all_roc %>% filter(ind_cat=="MED")
tapply(med$PWGTP, list(med$GRPIP_cat), sum)
prop.table(tapply(med$PWGTP, list(med$GRPIP_cat), sum)) # 40.5% are rent burdened
# Social services and care
sca <- all_roc %>% filter(ind_cat=="SCA")
tapply(sca$PWGTP, list(sca$GRPIP_cat), sum)
prop.table(tapply(sca$PWGTP, list(sca$GRPIP_cat), sum)) # 47.1% are rent burdened
# Standard Error Example
own <- filter(hh_roc, VACS==1)
pt.est <- sum(own$WGTP)
rep.names <- paste0('WGTP', 1:80)
rep.ests <- sapply(rep.names, function(n) sum(own[[n]]))
sqrt((4/80) * sum((rep.ests - pt.est)^2))
#-------------------------------------------------------------------------------
# OCPIP and HINCP: Gross owner costs as % of HH income; HH income
#-------------------------------------------------------------------------------
# Histogram of owner costs as % of HH income
hh_roc %>%
ggplot(aes(x=OCPIP, weight = WGTP)) +
geom_histogram() +
stat_bin(binwidth=5, geom="text", aes(label=..count..), vjust=-1.0) # Gross rent as % HH income
# Generate categories for home ownership cost burden
hh_roc$OCPIP_cat <- cut(hh_roc$OCPIP, breaks = c(0, 30, 50, 60, 80, 100, 10000000), labels = c(1,2,3,4,5,6), right = TRUE)
summary(hh_roc$OCPIP_cat)
tapply(hh_roc$WGTP, list(hh_roc$OCPIP_cat), sum)
prop.table(tapply(hh_roc$WGTP, list(hh_roc$OCPIP_cat), sum))
# <30% income: 77.2%
# 30-50% income: 13.0%
# 50-60% income: 2.8%
# 60-80% income: 2.2%
# 80-100% income: 1.6%
# >100% income: 3.4%
# Graph of OCPIP compared to HH income
hh_roc %>%
ggplot() +
geom_point(aes(x=HINCP, y=OCPIP, size=WGTP), shape=21) +
xlim(0,400000) # graph view restricted to HINCP from $0-$75,000
# Note: compared to graph of GRPIP, this one is far more bottom left,
# meaning far fewer owners are housing burdened
#-------------------------------------------------------------------------------
# Household income by AMI
#-------------------------------------------------------------------------------
# Compute Rochester median income
w_median_roc <- w.median(hh_roc$HINCP, hh_roc$WGTP) # command from cwhmisc package
# Compute Monroe County median income
w_median_mon <- w.median(hh_monroe$HINCP, hh_monroe$WGTP)
# Calculate AMI for Rochester Metro Area
# Look at HUD definition of AMI based on family size
# Generate AMI based on Rochester median income
hh_roc$ami_roc = hh_roc$HINCP/w_median_roc
# Generate Rochester AMI categories
hh_roc$ami_roc_cat <- cut(hh_roc$ami_roc, breaks = c(0, 0.3, 0.5, 0.6, 0.8, 1.0, 1.2, 10000000), labels = c(1,2,3,4,5,6,7), right = TRUE)
summary(hh_roc$ami_roc_cat)
tapply(hh_roc$WGTP, list(hh_roc$ami_roc_cat), sum)
prop.table(tapply(hh_roc$WGTP, list(hh_roc$ami_roc_cat), sum))
# <30% AMI: 12.9%
# 30-50% AMI: 12.2%
# 50-60% AMI: 4.9%
# 60-80% AMI: 10.0%
# 80-100% AMI: 9.0%
# 100-120% AMI: 7.1%
# >=120% AMI: 43.8%
# Generate AMI based on Monroe County median income
hh_roc$ami_mon = hh_roc$HINCP/w_median_mon
# Generate Monroe County AMI categories
hh_roc$ami_mon_cat <- cut(hh_roc$ami_mon, breaks = c(0, 0.3, 0.5, 0.6, 0.8, 1.0, 1.2, 10000000), labels = c(1,2,3,4,5,6,7), right = TRUE)
summary(hh_roc$ami_mon_cat)
tapply(hh_roc$WGTP, list(hh_roc$ami_mon_cat), sum)
prop.table(tapply(hh_roc$WGTP, list(hh_roc$ami_mon_cat), sum))
# <30% AMI: 25.5%
# 30-50% AMI: 16.6%
# 50-60% AMI: 7.9%
# 60-80% AMI: 11.5%
# 80-100% AMI: 8.9%
# 100-120% AMI: 6.5%
# >=120% AMI: 23.1%
#-------------------------------------------------------------------------------
# Gross Rent
#-------------------------------------------------------------------------------
# GRNTP = monthly gross rent. Create AGRNTP = annual gross rent
hh_roc$AGRNTP = hh_roc$GRNTP*12
with(hh_roc, Hmisc::wtd.quantile(GRNTP, weights=WGTP))
with(hh_roc, Hmisc::wtd.quantile(AGRNTP, weights=WGTP))
# Calculate income for which the unit is affordable (<=30% HH income)
# Formula: percent AMI = (Gross rent / 0.3 / AMI) * 100
# Rochester AMI
hh_roc$aff_inc_roc = (hh_roc$AGRNTP / 0.3 / w_median_roc)*100
with(hh_roc, Hmisc::wtd.quantile(aff_inc_roc, probs = c(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1), weights=WGTP))
# At the Rochester AMI, less than 10% of units are affordable for 30% AMI
# Monroe County AMI
hh_roc$aff_inc_mon = (hh_roc$AGRNTP / 0.3 / w_median_mon)*100
with(hh_roc, Hmisc::wtd.quantile(aff_inc_mon, probs = c(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1), weights=WGTP))
# At Monroe County AMI, approx. 10% of units are affordable for 30% AMI
# Generate Monroe County AMI affordability categories for rental units
hh_roc$aff_inc_cat <- cut(hh_roc$aff_inc_mon, breaks = c(0, 30, 50, 60, 80, 100, 120, 10000000), labels = c(1,2,3,4,5,6,7), right = TRUE)
summary(hh_roc$aff_inc_cat)
tapply(hh_roc$WGTP, list(hh_roc$aff_inc_cat), sum)
prop.table(tapply(hh_roc$WGTP, list(hh_roc$aff_inc_cat), sum))
# 10.1% of units exclusively fall within <30% AMI affordability
# 24.2% of units exclusively fall within 30-50% AMI affordability
# 19.1% of units exclusively fall within 50-60% AMI affordability
# 29.9% of units exclusively fall within 60-80% AMI affordability
# 9.9% of units exclusively fall within 80-100% AMI affordability
# 3.6% of units exclusively fall within 100-120% AMI affordability
# 3.1 % of units exclusively fall within >=120% AMI affordability
# Histogram of rental units at Monroe County AMI
hh_roc %>%
ggplot(aes(x=aff_inc_mon, weight = WGTP)) +
geom_histogram() +
stat_bin(binwidth=10, geom="text", aes(label=..count..), vjust=-1.0) # AMI needed for units
# Generate Rochester AMI affordability categories for rental units
hh_roc$aff_inc_cat_roc <- cut(hh_roc$aff_inc_roc, breaks = c(0, 30, 50, 60, 80, 100, 120, 10000000), labels = c(1,2,3,4,5,6,7), right = TRUE)
summary(hh_roc$aff_inc_cat_roc)
tapply(hh_roc$WGTP, list(hh_roc$aff_inc_cat_roc), sum)
prop.table(tapply(hh_roc$WGTP, list(hh_roc$aff_inc_cat_roc), sum))
# 3.9% of units exclusively fall within <30% AMI affordability
# 6.0% of units exclusively fall within 30-50% AMI affordability
# 3.8% of units exclusively fall within 50-60% AMI affordability
# 15.8% of units exclusively fall within 60-80% AMI affordability
# 22.7% of units exclusively fall within 80-100% AMI affordability
# 20.6% of units exclusively fall within 100-120% AMI affordability
# 27.2% of units exclusively fall within >=120% AMI affordability
# The category percentages are different when I use Rochester AMI because absolute
# rent prices are included in the calculation, which aren't based on AMI
#-------------------------------------------------------------------------------
# Miscellaneous
#-------------------------------------------------------------------------------
#**************************************
# Standard errors attempt - create proportions dataset
all <- hh_roc %>%
select(FES,
starts_with("WGTP"),
starts_with("GRPIP")
)
sub <- hh_rental %>%
select(FES,
starts_with("WGTP"),
starts_with("GRPIP")
)
prop <- sub/all
#**************************************
#**************************************
# Standard errors attempt - calculation of MV variable
# use "survey" package to set survey design and specify replicate weights
pumsd_hh <- hh_roc %>%
as_survey_rep(weights = WGTP, repweights = starts_with("WGTP"), combined_weights = TRUE)
# calculate mean and std. error of "length of time" variable
pumsd_hh %>% filter(!is.na(MV)) %>%
summarise(survey_mean(MV, na.rm = TRUE))
# mean=3.56, se=0.0131
# mean same as with previous method, so this is correct
x <- sqrt(wtd.var(hh_roc$MV, hh_roc$WGTP))
y <- x/sqrt(sum(hh_roc$WGTP))
#*****************************************
#*****************************************
# Standard Errors Attempt - survey package to calculate SE of person-level data, age
# Calculate mean and std. error of age overall and by category
# use "survey" package to set survey design and specify replicate weights
pumsd_all <- all_roc %>%
as_survey_rep(
weights = PWGTP,
repweights = starts_with("PWGTP"),
combined_weights = TRUE
)
# calculate mean and std. error of age
pumsd_all %>%
filter(!is.na(AGEP)) %>%
summarise(
survey_mean(AGEP, na.rm = TRUE)
)
# mean=, se=
# error message: Error in qr.default(weights(design, "analysis"), tol = 1e-05) :
# NA/NaN/Inf in foreign function call (arg 1)
# Another try
pumsd_all <-
svrepdesign(
weight = ~PWGTP ,
repweights = 'PWGTP[0-9]+' ,
scale = 4 / 80 ,
rscales = rep( 1 , 80 ) ,
mse = TRUE ,
type = 'JK1' ,
data = all_roc
)
#*************************************
|
/rochester_affordable.R
|
no_license
|
astaveski/rochester
|
R
| false
| false
| 91,567
|
r
|
# ===============================================================================
# * FILE: 03_rochester.R
# * PURPOSE: Import and Analyze PUMS data from 2018
# * AUTHORS: Andrea Ringer
# * DATE CREATED: June 9, 2020
# * DATE LAST MODIFIED: July 2, 2020
# ===============================================================================
library(readr)
library(tidyverse)
library(survey)
library(srvyr)
library(Hmisc)
library(cwhmisc)
library(collapse)
setwd("/Users/andrearinger/Documents/PUMS Data")
#-------------------------------------------------------------------------------
# Load PUMS Household Data and Select Rochester PUMAs
#-------------------------------------------------------------------------------
pums_hh <- read_csv("psam_h36_monroe.csv", guess_max=12000)
pums_all <- read_csv("psam_all_monroe.csv", guess_max = 12000)
hh_roc <- pums_hh %>%
filter(PUMA == "902" | PUMA == "903")
all_roc <- pums_all %>%
filter(PUMA == "902" | PUMA == "903")
hh_monroe <- pums_hh
#-------------------------------------------------------------------------------
# Clean Person and Household Data
#-------------------------------------------------------------------------------
# Separate year and ID number from SERIALNO
all_roc$year=substr(as.character(p_roc$SERIALNO),1,4)
all_roc$id=substr(as.character(p_roc$SERIALNO),5,25)
hh_roc$year=substr(as.character(hh_roc$SERIALNO),1,4)
hh_roc$id=substr(as.character(hh_roc$SERIALNO),5,25)
#-------------------------------------------------------------------------------
# GRPIP and HINCP: Gross rent as a percentage of HH income; HH income
#-------------------------------------------------------------------------------
# Generate categories for rent burden
hh_roc$GRPIP_cat <- cut(hh_roc$GRPIP, breaks = c(0, 30, 50, 60, 80, 100, 10000000), labels = c(1,2,3,4,5,6), right = TRUE)
summary(hh_roc$GRPIP_cat)
tapply(hh_roc$WGTP, list(hh_roc$GRPIP_cat), sum)
prop.table(tapply(hh_roc$WGTP, list(hh_roc$GRPIP_cat), sum))
# <30% income: 42.8%
# 30-50% income: 24.8%
# 50-60% income: 5.7%
# 60-80% income: 7.9%
# 80-100% income: 5.2%
# >100% income: 13.5%
# Create adjusted household income variable (using HINCP, ADJINC)
hh_roc$ADJINC_1 <- hh_roc$ADJINC/1000000
hh_roc$HINCP_adj <- hh_roc$HINCP*hh_roc$ADJINC_1
# Compute Monroe County median income
hh_monroe$ADJINC_1 <- hh_monroe$ADJINC/1000000
hh_monroe$HINCP_adj <- hh_monroe$HINCP*hh_roc$ADJINC_1
w_median_mon <- w.median(hh_monroe$HINCP_adj, hh_monroe$WGTP)
# Generate Monroe County AMI categories
hh_roc$ami_mon = hh_roc$HINCP_adj/w_median_mon
hh_roc$ami_mon_cat <- cut(hh_roc$ami_mon, breaks = c(0, 0.3, 0.5, 0.8, 10000000), labels = c(1,2,3,4), right = TRUE)
tapply(hh_roc$WGTP, list(hh_roc$ami_mon_cat), sum)
prop.table(tapply(hh_roc$WGTP, list(hh_roc$ami_mon_cat), sum))
gross_rent_perc <- select(hh_roc, GRPIP, WGTP) %>% tally(wt=WGTP)
ggplot(hh_roc, aes(x=GRPIP, weight = WGTP)) +
geom_histogram() +
stat_bin(binwidth=1, geom="text", aes(label=..count..), vjust=-1.0) # Gross rent as % HH income
# Why do so many people spend over 100% of income on rent?
# For those with GRPIP==101, weighted histogram of household incomes over last 12 months
hh_roc %>%
filter(GRPIP==101) %>%
ggplot(aes(x=HINCP, weight = WGTP)) + geom_histogram() +
stat_bin(binwidth=1, geom="text", aes(label=..count..), vjust=-1.0)
# For those with GRPIP==101, weighted summary of household incomes over last 12 months
hh_roc %>%
select(WGTP, GRPIP, HINCP) %>%
filter(GRPIP==101) %>%
summary(HINCP, wt=WGTP)
# Graph of income related to rent burden
hh_roc %>%
ggplot() +
geom_point(aes(x=HINCP, y=GRPIP, size=WGTP), shape=21) +
xlim(0,100000) # graph view restricted to HINCP from $0-$100,000
# Further analysis: put in 30% line, calculate percent of HHs above line
# Histogram of household income
hh_roc %>%
ggplot(aes(x=HINCP, weight=WGTP)) +
geom_histogram() +
stat_bin(binwidth=1, geom="text", aes(label=..count..), vjust=-1.0)
#===============================================================================
# Characteristics of those who are rent burdened - HH data
#===============================================================================
hh_rental <- hh_roc %>%
filter(hh_roc$TEN==3) # Rental HHs (excludes homeowner HHs)
rent_bur_no <- hh_roc %>% # <30% income on rent
filter(GRPIP_cat==1)
rent_bur <- hh_roc %>%
filter(GRPIP_cat %in% 2:6) # >=30% income on rent
summary(rent_bur$GRPIP_cat)
rent_bur_30to50 <- hh_roc %>%
filter(GRPIP_cat==2) # >=30% and <50% income on rent
summary(rent_bur_30to50$GRPIP_cat)
rent_bur_50to101 <- hh_roc %>%
filter(GRPIP_cat %in% 3:6) # >50% income on rent
summary(rent_bur_50to101$GRPIP_cat)
#-------------------------------------------------------------------------------
# FES: Family Type and Employment Status
#-------------------------------------------------------------------------------
# 1-4 Married Couple Family
# 1 = Husband and wife in LF; 2 = Husband in LF, wife not in LF
# 3 = Husband not in LF, wife in LF; 4 = Neither husband nor wife in LF
# 5-8 Other Family
# 5 = Male HHer, no wife present, LF; 6 = Male HHer, no wife present, not in LF
# 7 = Female HHer, no husband present, LF; 8 = Female HHer, no husband present, not in LF
# All rental HHs
tapply(hh_rental$WGTP, list(hh_rental$FES), sum)
prop.table(tapply(hh_rental$WGTP, list(hh_rental$FES), sum))
# Any rent burdened households (>=30% income)
tapply(rent_bur$WGTP, list(rent_bur$FES), sum)
prop.table(tapply(rent_bur$WGTP, list(rent_bur$FES), sum))
# Non rent burdened households (<30% income)
tapply(rent_bur_no$WGTP, list(rent_bur_no$FES), sum)
prop.table(tapply(rent_bur_no$WGTP, list(rent_bur_no$FES), sum))
# Rent burdened households (>=30% and <50% income)
tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$FES), sum)
prop.table(tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$FES), sum))
# Severely rent burdened households >=50%
tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$FES), sum)
prop.table(tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$FES), sum))
#--------------- STANDARD ERRORS - All Rental Family HHs -----------------------
rep.names <- paste0('WGTP', 1:80)
# All Rental Family HHs
all <- hh_rental %>% filter(FES %in% 1:8)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=623.25
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [21961, 24005]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [21761, 24205]
# Rental HHs: married couple, at least one HHer in LF
x <- hh_rental %>% filter(FES %in% 1:3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=284.39
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [3523, 4637]
# Proportion standard error: married couple, at least one HHer in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0114
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.155, 0.2]
# Rental HHs: single male headed in LF
x <- hh_rental %>% filter(FES==5)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=216.69
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [1179, 2029]
# Proportion standard error: single male headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00924
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0517, 0.0879]
# Rental HHs: single male headed not in LF
x <- hh_rental %>% filter(FES==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=137.26
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [587, 1125]
# Proportion standard error: single male headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00589
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0257, 0.0488]
# Rental HHs: single female headed in LF
x <- hh_rental %>% filter(FES==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=422.68
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [8890, 10546]
# Proportion standard error: single female headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0144
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.395, 0.451]
# Rental HHs: single female headed not in LF
x <- hh_rental %>% filter(FES==8)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=428.09
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [4882, 6560]
# Proportion standard error: single female headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0174
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.215, 0.283]
# Rental HHs: married couple family not in LF
x <- hh_rental %>% filter(FES==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=167.22
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [676, 1332]
# Proportion standard error: married couple family not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) #0.00718
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0296, 0.0578]
#--------------- STANDARD ERRORS - Any Rent-Burdened Family HHs ----------------
rep.names <- paste0('WGTP', 1:80)
# Any Rent Burdened Family HHs (>=30% of income on rent)
all <- rent_bur %>% filter(FES %in% 1:8)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=536.06
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [13445, 15203]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [13273, 15375]
# Any Rent-burdened family HHs: married couple, at least one HHer in LF
x <- rent_bur %>% filter(FES %in% 1:3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=138.06
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [856, 1398]
# Prop SE: Any rent-burdened family, married couple, at least one HHer in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00918
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0607, 0.0967]
# Any rent-burdened family HHs: single male headed in LF
x <- rent_bur %>% filter(FES==5)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=126.58
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [362, 858]
# Prop SE: Any rent-burdened family, single male headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00869
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0255, 0.0596]
# Any rent-burdened family HHs: single male headed not in LF
x <- rent_bur %>% filter(FES==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=119.48
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [326, 794]
# Prop SE: Any rent-burdened family, single male headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00821
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.023, 0.0552]
# Any rent-burdened family HHs: single female headed in LF
x <- rent_bur %>% filter(FES==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=435.02
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [5941, 7647]
# Prop SE: Any rent-burdened family single female headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0246
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.426, 0.523]
# Any rent-burdened family HHs: single female headed not in LF
x <- rent_bur %>% filter(FES==8)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=401.75
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [3809, 5383]
# Prop SE: Any rent-burdened family, single female headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0253
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.271, 0.371]
# Any rent-burdened family HHs: married couple family not in LF
x <- rent_bur %>% filter(FES==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=127.30
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [387, 887]
# Prop SE: Any rent-burdened family, married couple family not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) #0.00873
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0274, 0.0616]
#--------------- STANDARD ERRORS - Non Rent Burdened Family HHs ----------------
rep.names <- paste0('WGTP', 1:80)
# Non-Rent Burdened Family HHs
all <- rent_bur_no %>% filter(FES %in% 1:8)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=455.87
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [7569, 9065]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [7423, 9211]
# Non Rent-burdened family HHs: married couple, at least one HHer in LF
x <- rent_bur_no %>% filter(FES %in% 1:3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=273.28
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2417, 3489]
# Prop SE: Non Rent-burdened family, married couple, at least one HHer in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0265
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.303, 0.407]
# Non Rent-burdened family HHs: single male headed in LF
x <- rent_bur_no %>% filter(FES==5)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=175.46
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [650, 1338]
# Prop SE: Non rent-burdened family, single male headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0201
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0802, 0.1588]
# Non Rent-burdened family HHs: single male headed not in LF
x <- rent_bur_no %>% filter(FES==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=73.79
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [108, 398]
# Prop SE: Non rent-burdened family, single male headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00871
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0133, 0.0475]
# Non Rent-burdened family HHs: single female headed in LF
x <- rent_bur_no %>% filter(FES==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=296.99
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2330, 3494]
# Prop SE: Non rent-burdened family single female headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0301
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.291, 0.409]
# Non Rent-burdened family HHs: single female headed not in LF
x <- rent_bur_no %>% filter(FES==8)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=160.19
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [546, 1174]
# Prop SE: Non rent-burdened family, single female headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0184
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0673, 0.1395]
# Non Rent-burdened family HHs: married couple family not in LF
x <- rent_bur_no %>% filter(FES==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=93.71
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [161, 529]
# Prop SE: Non rent-burdened family, married couple family not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) #0.0110
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0199, 0.0631]
#--------------- STANDARD ERRORS - Rent Burdened Family HHs --------------------
rep.names <- paste0('WGTP', 1:80)
# Rent Burdened Family HHs (>=30% and <50% income on rent)
all <- rent_bur_30to50 %>% filter(FES %in% 1:8)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=336.43
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [5141, 6245]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [5034, 6352]
# Rent-burdened family HHs: married couple, at least one HHer in LF
x <- rent_bur_30to50 %>% filter(FES %in% 1:3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=102.92
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [417, 821]
# Prop SE: Rent-burdened family, married couple, at least one HHer in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0169
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0756, 0.1419]
# Rent-burdened family HHs: single male headed in LF
x <- rent_bur_30to50 %>% filter(FES==5)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=104.47
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [221, 631]
# Prop SE: rent-burdened family, single male headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0178
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0399, 0.1097]
# Rent-burdened family HHs: single male headed not in LF
x <- rent_bur_30to50 %>% filter(FES==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=75.45
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [53.1, 348.9]
# Prop SE: rent-burdened family, single male headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0131
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.00965, 0.06096]
# Rent-burdened family HHs: single female headed in LF
x <- rent_bur_30to50 %>% filter(FES==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=300.64
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2431, 3609]
# Prop SE: rent-burdened family single female headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0425
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.447, 0.614]
# Rent-burdened family HHs: single female headed not in LF
x <- rent_bur_30to50 %>% filter(FES==8)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=141.70
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [785, 1341]
# Prop SE: rent-burdened family, single female headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0223
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.143, 0.23]
# Rent-burdened family HHs: married couple family not in LF
x <- rent_bur_30to50 %>% filter(FES==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=92.89
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [182, 546]
# Prop SE: rent-burdened family, married couple family not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) #0.0159
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0328, 0.095]
#--------------- STANDARD ERRORS - Severely Rent-Burdened Family HHs -----------
rep.names <- paste0('WGTP', 1:80)
# Severely Rent Burdened Family HHs (>=30% of income on rent)
all <- rent_bur_50to101 %>% filter(FES %in% 1:8)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=471.60
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [7858, 9404]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [7707, 9555]
# Severely Rent-burdened family HHs: married couple, at least one HHer in LF
x <- rent_bur_50to101 %>% filter(FES %in% 1:3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=109.73
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [293, 723]
# Prop SE: Severely rent-burdened family, married couple, at least one HHer in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0123
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0347, 0.083]
# Severely rent-burdened family HHs: single male headed in LF
x <- rent_bur_50to101 %>% filter(FES==5)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=67.65
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [51.4, 316.6]
# Prop SE: Severely rent-burdened family, single male headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0078
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0061, 0.0365]
# Severely rent-burdened family HHs: single male headed not in LF
x <- rent_bur_50to101 %>% filter(FES==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=101.40
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [160, 558]
# Prop SE: Severely rent-burdened family, single male headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0115
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.019, 0.0642]
# Severely rent-burdened family HHs: single female headed in LF
x <- rent_bur_50to101 %>% filter(FES==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=316.17
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [3154, 4394]
# Prop SE: Severely rent-burdened family single female headed in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0278
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.383, 0.492]
# Severely rent-burdened family HHs: single female headed not in LF
x <- rent_bur_50to101 %>% filter(FES==8)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=370.62
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2807, 4259]
# Prop SE: Severely rent-burdened family, single female headed not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0367
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.337, 0.481]
# Severely rent-burdened family HHs: married couple family not in LF
x <- rent_bur_50to101 %>% filter(FES==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=75.07
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [126, 420]
# Prop SE: Severely rent-burdened family, married couple family not in LF / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) #0.00852
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0149, 0.04830]
#-------------------------------------------------------------------------------
# HHT: Household/Family Type
#-------------------------------------------------------------------------------
# 1 = Married couple HH
# 2 = Other family HH: Male HHer, no spouse present
# 3 = Other family HH: Female HHer, no spouse present
# 4-7 Non family HH
# 4 = Male HHer, living alone; 5 = Male HHer, not living alone
# 6 = Female HHer, living alone; 7 = Female HHer, not living alone
# All rental HHs
tapply(hh_rental$WGTP, list(hh_rental$HHT), sum)
prop.table(tapply(hh_rental$WGTP, list(hh_rental$HHT), sum))
# Any rent burdened households (>=30% income)
tapply(rent_bur$WGTP, list(rent_bur$HHT), sum)
prop.table(tapply(rent_bur$WGTP, list(rent_bur$HHT), sum))
# Non rent burdened households (>=30% and <50%)
tapply(rent_bur_no$WGTP, list(rent_bur_no$HHT), sum)
prop.table(tapply(rent_bur_no$WGTP, list(rent_bur_no$HHT), sum))
# Rent burdened households (>=30% and <50%)
tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$HHT), sum)
prop.table(tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$HHT), sum))
# Severely rent burdened households >=50%
tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$HHT), sum)
prop.table(tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$HHT), sum))
#------------------ STANDARD ERRORS - All Rental HHs ---------------------------
rep.names <- paste0('WGTP', 1:80)
# All Rental HHs
all <- hh_rental %>% filter(HHT %in% 1:7)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=772.67
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [52581, 55115]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [52334, 55362]
# Rental HHs: Married couple
x <- hh_rental %>% filter(HHT==1)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=317.50
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [4633, 5877]
# Prop SE: married couple / all rental HHs
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00572
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0864, 0.1088]
# Rental HHs: single female headed
x <- hh_rental %>% filter(HHT==3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=497.80
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [14463, 16415]
# Prop SE: single female headed / all rental HHs
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00828
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.27, 0.303]
# Rental HHs: male living alone
x <- hh_rental %>% filter(HHT==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=507.56
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [11324, 13314]
# Prop SE: male living alone / all rental HHs
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00884
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.211, 0.246]
# Rental HHs: female living alone
x <- hh_rental %>% filter(HHT==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=508.73
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [10750, 12744]
# Prop SE: female living alone / all rental HHs
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0089
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.201, 0.236]
# Rental HHs: other HH type
x <- hh_rental %>% filter(HHT==2 | HHT==5 | HHT==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=407.67
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [8289, 9887]
# Prop SE: other HH type / all rental HHs
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00717
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.155, 0.1830]
#------------------ STANDARD ERRORS - Any rent-burdened HHs --------------------
rep.names <- paste0('WGTP', 1:80)
# Any rent-burdened HHs (>=30% of income on rent)
all <- rent_bur %>% filter(HHT %in% 1:7)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=735.47
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [28727, 31139]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [28491, 31375]
# Any rent-burdened HHs: Married couple
x <- rent_bur %>% filter(HHT==1)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=179.92
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [1441, 2147]
# Prop SE: any rent-burdened married couple / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.00583
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0485, 0.0714]
# Any rent-burdened HHs: single female headed
x <- rent_bur %>% filter(HHT==3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=539.32
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [10333, 12447]
# Prop SE: Any rent-burdened single female headed / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0154
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.35, 0.411]
# Any rent-burdened HHs: male living alone
x <- rent_bur %>% filter(HHT==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=432.26
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [5871, 7565]
# Prop SE: Any rent-burdened male living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0134
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.198, 0.251]
# Any rent-burdened HHs: female living alone
x <- rent_bur %>% filter(HHT==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=418.25
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [6136, 7776]
# Prop SE: any rent-burdened female living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0128
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.207, 0.257]
# Any rent-burdened HHs: other HH type
x <- rent_bur %>% filter(HHT==2 | HHT==5 | HHT==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=245.90
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2593, 3557]
# Prop SE: any rent-burdened other HH type / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0078
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0874, 0.1181]
#------------------ STANDARD ERRORS - Non rent-burdened HHs --------------------
rep.names <- paste0('WGTP', 1:80)
# Non rent-burdened HHs (<30% of income on rent)
all <- rent_bur_no %>% filter(HHT %in% 1:7)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=801.20
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [21119, 23747]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [20863, 24003]
# Non rent-burdened HHs: Married couple
x <- rent_bur_no %>% filter(HHT==1)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=284.47
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2881, 3997]
# Prop SE: non rent-burdened married couple / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0114
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.131, 0.176]
# Non rent-burdened HHs: single female headed
x <- rent_bur_no %>% filter(HHT==3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=327.07
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [3131, 4413]
# Prop SE: Non rent-burdened single female headed / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0133
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.142, 0.194]
# Non rent-burdened HHs: male living alone
x <- rent_bur_no %>% filter(HHT==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=415.52
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [4206, 5834]
# Prop SE: Non rent-burdened male living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0167
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.191, 0.257]
# Non rent-burdened HHs: female living alone
x <- rent_bur_no %>% filter(HHT==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=375.42
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [3647, 5119]
# Prop SE: Non rent-burdened female living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0152
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.166, 0.225]
# Non rent-burdened HHs: other HH type
x <- rent_bur_no %>% filter(HHT==2 | HHT==5 | HHT==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=391.06
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [5053, 6585]
# Prop SE: non rent-burdened other HH type / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0148
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.23, 0.288]
#------------------ STANDARD ERRORS - Rent-burdened HHs ------------------------
rep.names <- paste0('WGTP', 1:80)0
# Rent-burdened HHs (>=30% and <50% of income on rent)
all <- rent_bur_30to50 %>% filter(HHT %in% 1:7)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=548.70
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [12065, 13865]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [11890, 14040]
# Rent-burdened HHs: Married couple
x <- rent_bur_30to50 %>% filter(HHT==1)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=134.31
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [733, 1259]
# Prop SE: rent-burdened married couple / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0098
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0575, 0.0961]
# Rent-burdened HHs: single female headed
x <- rent_bur_30to50 %>% filter(HHT==3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=303.21
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [3489, 4677]
# Prop SE: Rent-burdened single female headed / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0192
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.277, 0.353]
# Rent-burdened HHs: male living alone
x <- rent_bur_30to50 %>% filter(HHT==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=314.68
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2738, 3972]
# Prop SE: Rent-burdened male living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0217
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.216, 0.301]
# Rent-burdened HHs: female living alone
x <- rent_bur_30to50 %>% filter(HHT==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=269.37
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2291, 3347]
# Prop SE: Rent-burdened female living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0186
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.181, 0.254]
# Rent-burdened HHs: other HH type
x <- rent_bur_30to50 %>% filter(HHT==2 | HHT==5 | HHT==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=216.78
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [1287, 2137]
# Prop SE: rent-burdened other HH type / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0158
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.101, 0.163]
#------------------ STANDARD ERRORS - Severely Rent-burdened HHs ---------------
rep.names <- paste0('WGTP', 1:80)0
# Severely rent-burdened HHs (>=50% income on rent)
all <- rent_bur_50to101 %>% filter(HHT %in% 1:7)
pt.est_all <- sum(all$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(all[[n]]))
se_all <- sqrt((4/80) * sum((rep.ests - pt.est_all)^2)) # SE=579.98
ci90 <- c(pt.est_all-(1.64*se_all), pt.est_all+(1.64*se_all)) # [16017, 17919]
ci95 <- c(pt.est_all-(1.96*se_all), pt.est_all+(1.96*se_all)) # [15831, 18105]
# Severely rent-burdened HHs: Married couple
x <- rent_bur_50to101 %>% filter(HHT==1)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=130.55
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [542, 1054]
# Prop SE: Severely rent-burdened married couple / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0075
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.0323, 0.0618]
# Severely rent-burdened HHs: single female headed
x <- rent_bur_50to101 %>% filter(HHT==3)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=472.02
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [6382, 8232]
# Prop SE: Severely rent-burdened single female headed / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0236
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.384, 0.477]
# Severely rent-burdened HHs: male living alone
x <- rent_bur_50to101 %>% filter(HHT==4)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=340.45
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [2696, 4030]
# Prop SE: Severely rent-burdened male living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0189
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.161, 0.235]
# Severely rent-burdened HHs: female living alone
x <- rent_bur_50to101 %>% filter(HHT==6)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=289.44
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [3570, 4704]
# Prop SE: Severely rent-burdened female living alone / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0149
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.215, 0.273]
# Severely rent-burdened HHs: other HH type
x <- rent_bur_50to101 %>% filter(HHT==2 | HHT==5 | HHT==7)
pt.est <- sum(x$WGTP)
rep.ests <- sapply(rep.names, function(n) sum(x[[n]]))
se <- sqrt((4/80) * sum((rep.ests - pt.est)^2)) # SE=175.04
ci90 <- c(pt.est-(1.64*se), pt.est+(1.64*se))
ci95 <- c(pt.est-(1.96*se), pt.est+(1.96*se)) # [1020, 1706]
# Prop SE: Severely rent-burdened other HH type / all
se_prop <- (1/pt.est_all)*(sqrt(se^2-((pt.est^2/pt.est_all^2)*se_all^2))) # 0.0099
prop <- (pt.est/pt.est_all)
ci90_prop <- c(prop-(1.64*se_prop), prop+(1.64*se_prop))
ci95_prop <- c(prop-(1.96*se_prop), prop+(1.96*se_prop)) # [0.061, 0.010]
#------------------------------------------------------------------------------
# HHL: Household Language
#------------------------------------------------------------------------------
# (1) English only, (2) Spanish, (3) Other Indo-European languages
# (4) Asian and Pacific Island languages, (5) Other languages
# All Rental HHs
tapply(hh_rental$WGTP, list(hh_rental$HHL), sum)
prop.table(tapply(hh_rental$WGTP, list(hh_rental$HHL), sum))
# Largest categories: 1 (79%), 2 (15%), (3%)
# Any rent burdened households (>=30% income)
tapply(rent_bur$WGTP, list(rent_bur$HHL), sum)
prop.table(tapply(rent_bur$WGTP, list(rent_bur$HHL), sum))
# Largest categories: 1 (79%), 2 (17%), 3 (2%)
# Non rent burdened households (>=30% income)
tapply(rent_bur_no$WGTP, list(rent_bur_no$HHL), sum)
prop.table(tapply(rent_bur_no$WGTP, list(rent_bur_no$HHL), sum))
# Largest categories: 1 (81%), 2 (11%), 3 (4%)
# Rent burdened households (>=30% and <50%)
tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$HHL), sum)
prop.table(tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$HHL), sum))
# Largest categories: 1 (81%), 2 (15%), 3 (2%)
# Severely rent burdened households >=50%
tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$HHL), sum)
prop.table(tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$HHL), sum))
# Largest categories: 1 (76%), 2 (19%), 3 (3%)
# I find it interesting that:
# Moving from rent burdened to severely rent burdened, proportion of households
# that are English speaking decreases, and proportion that are Spanish and speaking increases
#------------------------------------------------------------------------------
# MV: Rent Burden by Length of Time in Unit
#------------------------------------------------------------------------------
# 1 = 12 mos or less; 2 = 13 to 23 mos; 3 = 2-4 years; 4 = 5-9 years
# 5 = 10-19 years; 6 = 20-29 years; 7 = 30+ years
# All Rental HHs
tapply(hh_rental$WGTP, list(hh_rental$MV), sum)
prop.table(tapply(hh_rental$WGTP, list(hh_rental$MV), sum))
# Any rent burdened households (>=30% income)
tapply(rent_bur$WGTP, list(rent_bur$MV), sum)
prop.table(tapply(rent_bur$WGTP, list(rent_bur$MV), sum))
# Non rent burdened households (>=30% income)
tapply(rent_bur_no$WGTP, list(rent_bur_no$MV), sum)
prop.table(tapply(rent_bur_no$WGTP, list(rent_bur_no$MV), sum))
# Rent burdened households (>=30% and <50%)
tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$MV), sum)
prop.table(tapply(rent_bur_30to50$WGTP, list(rent_bur_30to50$MV), sum))
# Severely rent burdened households >50%
tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$MV), sum)
prop.table(tapply(rent_bur_50to101$WGTP, list(rent_bur_50to101$MV), sum))
# Calculate average and standard error for length of time in rental unit
time_in_unit <- select(hh_roc, MV, WGTP) %>% tally(wt=WGTP)
weighted.mean(all_roc$MV, all_roc$WGTP_HH, na.rm==TRUE) # 3.56 (mean between 2-4 years)
w.median(all_roc$MV, all_roc$WGTP_HH) # 3 (median between 2-4 years)
#------------------------------------------------------------------------------
# MV: Length of Time in Unit by Income
#------------------------------------------------------------------------------
# Use AMI categories created previously (ami_mon_cat)
# 1=0-30%, 2=30-50%, 3=50-60%, 4=60-80%, 5=80-100%, 6=100-120%, 7=120%+
d <- hh_rental %>% filter(ami_mon_cat %in% 1:4)
#-------------------------- All Rental HHs
z <- d
w <- z$WGTP
v <- z$ami_mon_cat
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
weighted.mean(z$HINCP_adj, w) # $36,361.46
var <- wtd.var(z$HINCP_adj, w) # from HMISC package
sd <- sqrt(var) # $41,857.30
# use "survey" package to set survey design and specify replicate weights
pumsd_hh <- z %>%
as_survey_rep(weights = WGTP, repweights = starts_with("WGTP"), combined_weights = TRUE)
# calculate mean and std. error of HINCP_adj
pumsd_hh %>% filter(!is.na(HINCP_adj)) %>%
summarise(survey_mean(HINCP_adj, na.rm = TRUE)) # mean=36361, se=454
# ------------------------- HHs in unit 12 mos or less
z <- d %>% filter(MV==1)
w <- z$WGTP
v <- z$ami_mon_cat
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
weighted.mean(z$HINCP_adj, w) # $36,626.06
var <- wtd.var(z$HINCP_adj, w)
sd <- sqrt(var) # $33,459.45
# use "survey" package to set survey design and specify replicate weights
pumsd_hh <- z %>%
as_survey_rep(weights = WGTP, repweights = starts_with("WGTP"), combined_weights = TRUE)
# calculate mean and std. error of HINCP_adj
pumsd_hh %>% filter(!is.na(HINCP_adj)) %>%
summarise(survey_mean(HINCP_adj, na.rm = TRUE)) # mean=36626, se=601
# -------------------------- HHs in unit 13-23 mos
z <- d %>% filter(MV==2)
w <- z$WGTP
v <- z$ami_mon_cat
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
weighted.mean(z$HINCP_adj, w) # $43,721.19
var <- wtd.var(z$HINCP_adj, w)
sd <- sqrt(var) # $49,303.48
# use "survey" package to set survey design and specify replicate weights
pumsd_hh <- z %>%
as_survey_rep(weights = WGTP, repweights = starts_with("WGTP"), combined_weights = TRUE)
# calculate mean and std. error of HINCP_adj
pumsd_hh %>% filter(!is.na(HINCP_adj)) %>%
summarise(survey_mean(HINCP_adj, na.rm = TRUE)) # mean=43721, se=1594
# -------------------------- HHs in unit 2-4 years
z <- d %>% filter(MV==3)
w <- z$WGTP
v <- z$ami_mon_cat
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
weighted.mean(z$HINCP_adj, w) # $35,487.76
var <- wtd.var(z$HINCP_adj, w)
sd <- sqrt(var) # $35,914.70
# use "survey" package to set survey design and specify replicate weights
pumsd_hh <- z %>%
as_survey_rep(weights = WGTP, repweights = starts_with("WGTP"), combined_weights = TRUE)
# calculate mean and std. error of HINCP_adj
pumsd_hh %>% filter(!is.na(HINCP_adj)) %>%
summarise(survey_mean(HINCP_adj, na.rm = TRUE)) # mean=35488, se=817
# --------------------------- HHs in unit 5-9 years
z <- d %>% filter(MV==4)
w <- z$WGTP
v <- z$ami_mon_cat
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
weighted.mean(z$HINCP_adj, w) # $35,991.89
var <- wtd.var(z$HINCP_adj, w)
sd <- sqrt(var) # $60,767.78
# use "survey" package to set survey design and specify replicate weights
pumsd_hh <- z %>%
as_survey_rep(weights = WGTP, repweights = starts_with("WGTP"), combined_weights = TRUE)
# calculate mean and std. error of HINCP_adj
pumsd_hh %>% filter(!is.na(HINCP_adj)) %>%
summarise(survey_mean(HINCP_adj, na.rm = TRUE)) # mean=35992, se=1389
# ---------------------------- HHs in unit 10+ years
z <- d %>% filter(MV %in% 5:7)
w <- z$WGTP
v <- z$ami_mon_cat
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
weighted.mean(z$HINCP_adj, w) # $32,305.18
var <- wtd.var(z$HINCP_adj, w)
sd <- sqrt(var) # $32,915.49
stderror <- sd/sqrt(sum(w)) # 378
# use "survey" package to set survey design and specify replicate weights
pumsd_hh <- z %>%
as_survey_rep(weights = WGTP, repweights = starts_with("WGTP"), combined_weights = TRUE)
# calculate mean and std. error of HINCP_adj
pumsd_hh %>% filter(!is.na(HINCP_adj)) %>%
summarise(survey_mean(HINCP_adj, na.rm = TRUE)) # mean=32035, se=901
#------------------------------------------------------------------------------
# MV: Length of Time in Rental Unit by Presence of Children (HUPAC)
#------------------------------------------------------------------------------
# HUPAC
# 1=Children under 6 only; 2=Children 6-17 only; 3=Children <6 & 6-17; 4=No children
d <- hh_rental %>% filter(HUPAC %in% 1:4)
#-------------------------- All Rental HHs
z <- d
w <- z$WGTP
v <- z$HUPAC
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 12 mos or less
z <- d %>% filter(MV==1)
w <- z$WGTP
v <- z$HUPAC
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 13-23 mos
z <- d %>% filter(MV==2)
w <- z$WGTP
v <- z$HUPAC
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 2-4 years
z <- d %>% filter(MV==3)
w <- z$WGTP
v <- z$HUPAC
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 5-9 years
z <- d %>% filter(MV==4)
w <- z$WGTP
v <- z$HUPAC
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 10+ years
z <- d %>% filter(MV %in% 5:7)
w <- z$WGTP
v <- z$HUPAC
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
#===============================================================================
# Characteristics of those who are rent burdened - merged data
#===============================================================================
# Generate categories for rent burden in merged data
all_roc$GRPIP_cat <- cut(all_roc$GRPIP, breaks = c(0, 30, 50, 60, 80, 100, 10000000), labels = c(1,2,3,4,5,6), right = TRUE)
summary(all_roc$GRPIP_cat)
prop.table(tapply(all_roc$WGTP_HH, list(all_roc$GRPIP_cat), sum))
# Sanity check: the proportions are the same as when I used just the HH data,
# which means the HH weights I created in the merged dataset are correct
# (I created the correct weight variable in Stata. The variable is "WGTP_HH")
# Generate categories for age in merged data
all_roc$age_cat <- cut(all_roc$AGEP, breaks = c(0, 20, 30, 50, 70, 10000000), labels = c(1,2,3,4,5), right = TRUE)
# Generate categories for race in merged data
all_roc$RACE = ifelse(all_roc$HISP == 01, all_roc$RAC1P, 10)
# Generate categories for employment status in merged data
# ESR (employment status recode)
# 1=civilian employed, at work; 2=civilian employed, with a job but not at work
# 3=unemployed; 4=Armed forces, at work; 5=Armed forces, with a job but not at work
# 6=Not in labor force
all_roc$EMP = ifelse(all_roc$ESR %in% 1:2 | all_roc$ESR %in% 4:5, 1, all_roc$ESR) #EMP=1,3,6
all_roc$EMP = ifelse(all_roc$EMP == 6, 4, all_roc$EMP) #EMP: 1=employed, 3=unemployed, 4=out of LF
# Generate part-time and full-time for EMP
all_roc$EMP = ifelse(all_roc$EMP==1 & all_roc$WKHP<40, 2, all_roc$EMP)
summary(all_roc$EMP)
#EMP: 1=full-time employed, 2=part-time employed, 3=unemployed, 4=out of labor force
# Generate rent burdened category variables
rent_all <- all_roc %>% filter(TEN==3) # All rental HHs
rent_bur_all <- all_roc %>% filter(GRPIP_cat %in% 2:6) # >=30% income on rent
rent_bur_non <- all_roc %>% filter(GRPIP_cat==1)
rent_bur_slight <- all_roc %>% filter(GRPIP_cat==2) # >=30% and <50% income on rent
rent_bur_severe <- all_roc %>% filter(GRPIP_cat %in% 3:6) # >=50% income on rent
#-------------------------------------------------------------------------------
# Length of time in rental unit by age (MV, age_cat)
#-------------------------------------------------------------------------------
# 1 = 12 mos or less; 2 = 13 to 23 mos; 3 = 2-4 years; 4 = 5-9 years
# 5 = 10-19 years; 6 = 20-29 years; 7 = 30+ years
# Restrict dataset to head of household (HOH)
# Use SPORDER to collapse data and create one obs. per HH of head of household
rent_hoh <- rent_all %>%
arrange(SERIALNO, SPORDER) %>%
filter(SPORDER==1)
# Generate age categories for <30, 30-50, 50-70, 70+
rent_hoh$age_cat_2 <- cut(rent_hoh$AGEP, breaks = c(0, 30, 50, 70, 10000000), labels = c(1,2,3,4), right = TRUE)
# All Rental HOHs
z <- rent_hoh
tapply(z$PWGTP, list(z$age_cat_2), sum)
prop.table(tapply(z$PWGTP, list(z$age_cat_2), sum))
# HOHs in unit 12 mos or less
z <- rent_hoh %>% filter(MV==1)
tapply(z$PWGTP, list(z$age_cat_2), sum)
prop.table(tapply(z$PWGTP, list(z$age_cat_2), sum))
# HOHs in unit 13-23 mos
z <- rent_hoh %>% filter(MV==2)
tapply(z$PWGTP, list(z$age_cat_2), sum)
prop.table(tapply(z$PWGTP, list(z$age_cat_2), sum))
# HOHs in unit 2-4 years
z <- rent_hoh %>% filter(MV==3)
tapply(z$PWGTP, list(z$age_cat_2), sum)
prop.table(tapply(z$PWGTP, list(z$age_cat_2), sum))
# HOHs in unit 5-9 years
z <- rent_hoh %>% filter(MV==4)
tapply(z$PWGTP, list(z$age_cat_2), sum)
prop.table(tapply(z$PWGTP, list(z$age_cat_2), sum))
# HOHs in unit 10+ years
z <- rent_hoh %>% filter(MV %in% 5:7)
tapply(z$PWGTP, list(z$age_cat_2), sum)
prop.table(tapply(z$PWGTP, list(z$age_cat_2), sum))
#-------------------------------------------------------------------------------
# Length of time in rental unit by race (MV, RACE)
#-------------------------------------------------------------------------------
# RACE: 1=White, 2=Black, 10=Hispanic
# All Rental HOHs
z <- rent_hoh
tapply(z$PWGTP, list(z$RACE), sum)
prop.table(tapply(z$PWGTP, list(z$RACE), sum))
# HOHs in unit 12 mos or less
z <- rent_hoh %>% filter(MV==1)
tapply(z$PWGTP, list(z$RACE), sum)
prop.table(tapply(z$PWGTP, list(z$RACE), sum))
# HOHs in unit 13-23 mos
z <- rent_hoh %>% filter(MV==2)
tapply(z$PWGTP, list(z$RACE), sum)
prop.table(tapply(z$PWGTP, list(z$RACE), sum))
# HOHs in unit 2-4 years
z <- rent_hoh %>% filter(MV==3)
tapply(z$PWGTP, list(z$RACE), sum)
prop.table(tapply(z$PWGTP, list(z$RACE), sum))
# HOHs in unit 5-9 years
z <- rent_hoh %>% filter(MV==4)
tapply(z$PWGTP, list(z$RACE), sum)
prop.table(tapply(z$PWGTP, list(z$RACE), sum))
# HOHs in unit 10+ years
z <- rent_hoh %>% filter(MV %in% 5:7)
tapply(z$PWGTP, list(z$RACE), sum)
prop.table(tapply(z$PWGTP, list(z$RACE), sum))
#-------------------------------------------------------------------------------
# Length of time in rental unit by employment status (MV; EMP)
#-------------------------------------------------------------------------------
# EMP: 1=full-time employed, 2=part-time employed, 3=unemployed, 4=out of labor force
# Restrict to head of households only (HOH)
d <- rent_hoh %>% filter(EMP %in% 1:4)
#-------------------------- All Rental HHs
z <- d
w <- z$PWGTP
v <- z$EMP
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 12 mos or less
z <- d %>% filter(MV==1)
w <- z$PWGTP
v <- z$EMP
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 13-23 mos
z <- d %>% filter(MV==2)
w <- z$PWGTP
v <- z$EMP
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 2-4 years
z <- d %>% filter(MV==3)
w <- z$PWGTP
v <- z$EMP
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 5-9 years
z <- d %>% filter(MV==4)
w <- z$PWGTP
v <- z$EMP
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
# ------------------------- HHs in unit 10+ years
z <- d %>% filter(MV %in% 5:7)
w <- z$PWGTP
v <- z$EMP
tapply(w, list(v), sum)
prop.table(tapply(w, list(v), sum))
#-------------------------------------------------------------------------------
# AGEP: Age of Single-Renter HHs
#-------------------------------------------------------------------------------
# What is the average age of male and female single rental HHs? (see HHT section above)
hh_single <- all_roc %>% filter((HHT==4 | HHT==6) & TEN==3)
hh_single_f <- all_roc %>% filter(HHT==6 & TEN==3)
hh_single_m <- all_roc %>% filter(HHT==4 & TEN ==3)
# Single HHs rent-burden categories
rent_bur_all_s <- rent_bur_all %>% filter(HHT==4 | HHT==6)
rent_bur_non_s <- rent_bur_non %>% filter(HHT==4 | HHT==6)
rent_bur_slight_s <- rent_bur_slight %>% filter(HHT==4 | HHT==6)
rent_bur_severe_s <- rent_bur_severe %>% filter(HHT==4 | HHT==6)
# Female single HHs rent-burden categories
rent_bur_all_sf <- rent_bur_all %>% filter(HHT==6)
rent_bur_non_sf <- rent_bur_non %>% filter(HHT==6)
rent_bur_slight_sf <- rent_bur_slight %>% filter(HHT==6)
rent_bur_severe_sf <- rent_bur_severe %>% filter(HHT==6)
# Male single HHs rent-burden categories
rent_bur_all_sm <- rent_bur_all %>% filter(HHT==4)
rent_bur_non_sm <- rent_bur_non %>% filter(HHT==4)
rent_bur_slight_sm <- rent_bur_slight %>% filter(HHT==4)
rent_bur_severe_sm <- rent_bur_severe %>% filter(HHT==4)
# Average age of single HHs, by gender
weighted.mean(hh_single$AGEP, hh_single$WGTP_HH) # all: 48.8 years old
weighted.mean(hh_single$AGEP, hh_single$PWGTP) # all: 48.8 years old
weighted.mean(hh_single_f$AGEP, hh_single_f$PWGTP) # female: 50.1 years old
weighted.mean(hh_single_m$AGEP, hh_single_m$PWGTP) # male: 47.6 years old
# Average age of rent-burdened single HHs
weighted.mean(rent_bur_single$AGEP, rent_bur_single$PWGTP) # all: 51.1 years old
weighted.mean(rent_bur_single_f$AGEP, rent_bur_single_f$PWGTP) # female: 51.9 years old
weighted.mean(rent_bur_single_m$AGEP, rent_bur_single_m$PWGTP) # male: 50.3 years old
#----------------------------- Single renter HHs
tapply(hh_single$PWGTP, list(hh_single$age_cat), sum)
prop.table(tapply(hh_single$PWGTP, list(hh_single$age_cat), sum))
# Any rent-burdened single renter HHs
tapply(rent_bur_all_s$PWGTP, list(rent_bur_all_s$age_cat), sum)
prop.table(tapply(rent_bur_all_s$PWGTP, list(rent_bur_all_s$age_cat), sum))
# Non rent-burdened single renter HHs
tapply(rent_bur_non_s$PWGTP, list(rent_bur_non_s$age_cat), sum)
prop.table(tapply(rent_bur_non_s$PWGTP, list(rent_bur_non_s$age_cat), sum))
# Rent-burdened single renter HHs
tapply(rent_bur_slight_s$PWGTP, list(rent_bur_slight_s$age_cat), sum)
prop.table(tapply(rent_bur_slight_s$PWGTP, list(rent_bur_slight_s$age_cat), sum))
# Severely rent-burdened single renter HHs
tapply(rent_bur_severe_s$PWGTP, list(rent_bur_severe_s$age_cat), sum)
prop.table(tapply(rent_bur_severe_s$PWGTP, list(rent_bur_severe_s$age_cat), sum))
#-------------------------------- Female single renter HHs
tapply(hh_single_f$PWGTP, list(hh_single_f$age_cat), sum)
prop.table(tapply(hh_single_f$PWGTP, list(hh_single_f$age_cat), sum))
# Any rent-burdened female single renter HHs
tapply(rent_bur_all_sf$PWGTP, list(rent_bur_all_sf$age_cat), sum)
prop.table(tapply(rent_bur_all_sf$PWGTP, list(rent_bur_all_sf$age_cat), sum))
# Non rent-burdened female single renter HHs
tapply(rent_bur_non_sf$PWGTP, list(rent_bur_non_sf$age_cat), sum)
prop.table(tapply(rent_bur_non_sf$PWGTP, list(rent_bur_non_sf$age_cat), sum))
# Rent-burdened female single renter HHs
tapply(rent_bur_slight_sf$PWGTP, list(rent_bur_slight_sf$age_cat), sum)
prop.table(tapply(rent_bur_slight_sf$PWGTP, list(rent_bur_slight_sf$age_cat), sum))
# Severely rent-burdened female single renter HHs
tapply(rent_bur_severe_sf$PWGTP, list(rent_bur_severe_sf$age_cat), sum)
prop.table(tapply(rent_bur_severe_sf$PWGTP, list(rent_bur_severe_sf$age_cat), sum))
#-------------------------------- Male single renter HHs
tapply(hh_single_m$PWGTP, list(hh_single_m$age_cat), sum)
prop.table(tapply(hh_single_m$PWGTP, list(hh_single_m$age_cat), sum))
# Any rent-burdened male single renter HHs
tapply(rent_bur_all_sm$PWGTP, list(rent_bur_all_sm$age_cat), sum)
prop.table(tapply(rent_bur_all_sm$PWGTP, list(rent_bur_all_sm$age_cat), sum))
# Non rent-burdened male single renter HHs
tapply(rent_bur_non_sm$PWGTP, list(rent_bur_non_sm$age_cat), sum)
prop.table(tapply(rent_bur_non_sm$PWGTP, list(rent_bur_non_sm$age_cat), sum))
# Rent-burdened male single renter HHs
tapply(rent_bur_slight_sm$PWGTP, list(rent_bur_slight_sm$age_cat), sum)
prop.table(tapply(rent_bur_slight_sm$PWGTP, list(rent_bur_slight_sm$age_cat), sum))
# Severely rent-burdened male single renter HHs
tapply(rent_bur_severe_sm$PWGTP, list(rent_bur_severe_sm$age_cat), sum)
prop.table(tapply(rent_bur_severe_sm$PWGTP, list(rent_bur_severe_sm$age_cat), sum))
#-------------------------------------------------------------------------------
# RACE: Race of All Renter HHs
#-------------------------------------------------------------------------------
# RACE variable
# 1 = White alone; 2 = Black alone; 3 = American Indian alone; 4 = Alaska Native alone
# 5 = American Indian & Alaskan Native; 6 = Asian alone; 7 = Native Hawaiian / Pacific Islander alone
# 8 = Some other race alone; 9 = Two or more races; 10 = Hispanic
# (Categories for race generated in previous section in merged data)
# Race proportions in Rochester population
prop.table(tapply(all_roc$PWGTP, list(all_roc$RACE), sum))
# 36.6% White, 38.3% Black, 18.4% Hispanic
# For now I'll look at the population. I need to figure out how to collapse at the
# HH level after creating the RACE variable, to do the HH analysis (will be more accurate)
# Race of renter household population
tapply(rent_all$PWGTP, list(rent_all$RACE), sum)
prop.table(tapply(rent_all$PWGTP, list(rent_all$RACE), sum))
# Race of all rent burdened population (>=30% income)
tapply(rent_bur_all$PWGTP, list(rent_bur_all$RACE), sum)
prop.table(tapply(rent_bur_all$PWGTP, list(rent_bur_all$RACE), sum))
# Race of non rent-burdened population (<30% income)
tapply(rent_bur_non$PWGTP, list(rent_bur_non$RACE), sum)
prop.table(tapply(rent_bur_non$PWGTP, list(rent_bur_non$RACE), sum))
# Race of slightly rent burdened population (>=30% and <50% income)
tapply(rent_bur_slight$PWGTP, list(rent_bur_slight$RACE), sum)
prop.table(tapply(rent_bur_slight$PWGTP, list(rent_bur_slight$RACE), sum))
# Race of severely rent burdened population (>=50% income)
tapply(rent_bur_severe$PWGTP, list(rent_bur_severe$RACE), sum)
prop.table(tapply(rent_bur_severe$PWGTP, list(rent_bur_severe$RACE), sum))
# Race of single renter HHs (see section HHT above)
prop.table(tapply(hh_single$PWGTP, list(hh_single$RACE), sum))
# Race of rent-burdened single renter HHs
prop.table(tapply(rent_bur_single$PWGTP, list(rent_bur_single$RACE), sum))
#-------------------------------------------------------------------------------
# MAR: Marital Status of Single-Renter HHs
#-------------------------------------------------------------------------------
# Marital status of single-renter HHs
# 1 = Married, 2 = Widowed, 3 = Divorced, 4 = Separated, 5 = Never married
#----------------------------- Single renter HHs
tapply(hh_single$PWGTP, list(hh_single$MAR), sum)
prop.table(tapply(hh_single$PWGTP, list(hh_single$MAR), sum))
# Any rent-burdened single renter HHs
tapply(rent_bur_all_s$PWGTP, list(rent_bur_all_s$MAR), sum)
prop.table(tapply(rent_bur_all_s$PWGTP, list(rent_bur_all_s$MAR), sum))
# Non rent-burdened single renter HHs
tapply(rent_bur_non_s$PWGTP, list(rent_bur_non_s$MAR), sum)
prop.table(tapply(rent_bur_non_s$PWGTP, list(rent_bur_non_s$MAR), sum))
# Rent-burdened single renter HHs
tapply(rent_bur_slight_s$PWGTP, list(rent_bur_slight_s$MAR), sum)
prop.table(tapply(rent_bur_slight_s$PWGTP, list(rent_bur_slight_s$MAR), sum))
# Severely rent-burdened single renter HHs
tapply(rent_bur_severe_s$PWGTP, list(rent_bur_severe_s$MAR), sum)
prop.table(tapply(rent_bur_severe_s$PWGTP, list(rent_bur_severe_s$MAR), sum))
#-------------------------------- Female single renter HHs
tapply(hh_single_f$PWGTP, list(hh_single_f$MAR), sum)
prop.table(tapply(hh_single_f$PWGTP, list(hh_single_f$MAR), sum))
# Any rent-burdened female single renter HHs
tapply(rent_bur_all_sf$PWGTP, list(rent_bur_all_sf$MAR), sum)
prop.table(tapply(rent_bur_all_sf$PWGTP, list(rent_bur_all_sf$MAR), sum))
# Non rent-burdened female single renter HHs
tapply(rent_bur_non_sf$PWGTP, list(rent_bur_non_sf$MAR), sum)
prop.table(tapply(rent_bur_non_sf$PWGTP, list(rent_bur_non_sf$MAR), sum))
# Rent-burdened female single renter HHs
tapply(rent_bur_slight_sf$PWGTP, list(rent_bur_slight_sf$MAR), sum)
prop.table(tapply(rent_bur_slight_sf$PWGTP, list(rent_bur_slight_sf$MAR), sum))
# Severely rent-burdened female single renter HHs
tapply(rent_bur_severe_sf$PWGTP, list(rent_bur_severe_sf$MAR), sum)
prop.table(tapply(rent_bur_severe_sf$PWGTP, list(rent_bur_severe_sf$MAR), sum))
#-------------------------------- Male single renter HHs
tapply(hh_single_m$PWGTP, list(hh_single_m$MAR), sum)
prop.table(tapply(hh_single_m$PWGTP, list(hh_single_m$MAR), sum))
# Any rent-burdened male single renter HHs
tapply(rent_bur_all_sm$PWGTP, list(rent_bur_all_sm$MAR), sum)
prop.table(tapply(rent_bur_all_sm$PWGTP, list(rent_bur_all_sm$MAR), sum))
# Non rent-burdened male single renter HHs
tapply(rent_bur_non_sm$PWGTP, list(rent_bur_non_sm$MAR), sum)
prop.table(tapply(rent_bur_non_sm$PWGTP, list(rent_bur_non_sm$MAR), sum))
# Rent-burdened male single renter HHs
tapply(rent_bur_slight_sm$PWGTP, list(rent_bur_slight_sm$MAR), sum)
prop.table(tapply(rent_bur_slight_sm$PWGTP, list(rent_bur_slight_sm$MAR), sum))
# Severely rent-burdened male single renter HHs
tapply(rent_bur_severe_sm$PWGTP, list(rent_bur_severe_sm$MAR), sum)
prop.table(tapply(rent_bur_severe_sm$PWGTP, list(rent_bur_severe_sm$MAR), sum))
#-------------------------------------------------------------------------------
# Marital Status of Single-Renter HHs Ages 50-70
#-------------------------------------------------------------------------------
#----------------------------- 50-70 Single renter HHs
z <- hh_single %>% filter(age_cat==4)
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Any rent-burdened 50-70 single renter HHs
z <- rent_bur_all_s %>% filter(age_cat==4)
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Non rent-burdened 50-70 single renter HHs
z <- rent_bur_non_s %>% filter(age_cat==4)
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Rent-burdened 50-70 single renter HHs
z <- rent_bur_slight_s %>% filter(age_cat==4)
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Severely rent-burdened 50-70single renter HHs
z <- rent_bur_severe_s %>% filter(age_cat==4)
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
#-------------------------------- Female single renter HHs
z <- single_50to70_f
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Any rent-burdened female 50-70 single renter HHs
z <- rent_bur_all_sffifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Non rent-burdened female 50-70 single renter HHs
z <- rent_bur_non_sffifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Rent-burdened female 50-70 single renter HHs
z <- rent_bur_slight_sffifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Severely rent-burdened female 50-70 single renter HHs
z <- rent_bur_severe_sffifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
#-------------------------------- Male single renter HHs
z <- single_50to70_m
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Any rent-burdened male 50-70 single renter HHs
z <- rent_bur_all_smfifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Non rent-burdened male 50-70 single renter HHs
z <- rent_bur_non_smfifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Rent-burdened male 50-70 single renter HHs
z <- rent_bur_slight_smfifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
# Severely rent-burdened male 50-70 single renter HHs
z <- rent_bur_severe_smfifty
tapply(z$PWGTP, list(z$MAR), sum)
prop.table(tapply(z$PWGTP, list(z$MAR), sum))
#----------------- Occupation of Female-Headed in Labor Force Family HHs ------------
female_lf <- rent_all %>% filter(FES==7) # Rental HH families lead by single female in LF
# Restrict data to HH head only
# Try function from "collapse" package
hh_female_lf_2 <- collap(female_lf, ~ SERIALNO, ffirst) # keep only first obs of SERIALNO
hh_test <- collap(all_roc, ~ SERIALNO, ffirst) # Test collapse with "all_roc" to see if I get # of obs in "hh_roc" data
# Not sure that this works correctly
# Use SPORDER to collapse data and create one obs. per HH
female_lf <- arrange(female_lf, SERIALNO, SPORDER)
hh_female_lf <- female_lf %>% filter(SPORDER==1)
# Occupation code: INDP, NAICSP
tapply(hh_female_lf$PWGTP, list(hh_female_lf$INDP), sum)
prop.table(tapply(hh_female_lf$PWGTP, list(hh_female_lf$INDP), sum))
# Create occupation categories (based on INDP codes, see data dictionary)
hh_female_lf$ind_cat <- cut(hh_female_lf$INDP, breaks = c(0, 300, 500, 700, 1000, 4000, 4600, 6000, 6400, 6800, 7200, 7800, 7900, 8300, 8500, 8700, 9300, 9600, 9900, 10000000),
labels = c("AGR", "EXT", "UTL", "CON", "MFG", "WHL", "RET", "TRN", "INF", "FIN", "PRF", "EDU", "MED", "SCA", "ENT", "SRV", "ADM", "MIL", "UEM"), right = TRUE)
# Create subsets of female single-headed HHs based on rent burden
hh_female_lf_bur <- hh_female_lf %>% filter(GRPIP_cat %in% 2:5)
hh_female_lf_bur_slight <- hh_female_lf %>% filter(GRPIP_cat==2)
hh_female_lf_bur_high <- hh_female_lf %>% filter(GRPIP_cat %in% 3:5)
hh_female_lf_bur_severe <- hh_female_lf %>% filter(GRPIP_cat==6)
# All single-female-headed-HHs in LF occupation categories
tapply(hh_female_lf$PWGTP, list(hh_female_lf$ind_cat), sum)
# Rent-burdened single-female-headed-HHs in LF occupation categories
tapply(hh_female_lf_bur$PWGTP, list(hh_female_lf_bur$ind_cat), sum)
# Slightly rent-burdened single-female-headed-HHs in LF occupation categories
tapply(hh_female_lf_bur_slight$PWGTP, list(hh_female_lf_bur_slight$ind_cat), sum)
# Highly rent-burdened single-female-headed-HHs in LF occupation categories
tapply(hh_female_lf_bur_high$PWGTP, list(hh_female_lf_bur_high$ind_cat), sum)
# Severely rent-burdened single-female-headed-HHs in LF occupation categories
tapply(hh_female_lf_bur_severe$PWGTP, list(hh_female_lf_bur_severe$ind_cat), sum)
#----------------- Rent Burden of Different Occupations -------------------------
# Create occupation categories (based on INDP codes, see data dictionary)
all_roc$ind_cat <- cut(all_roc$INDP, breaks = c(0, 300, 500, 700, 1000, 4000, 4600, 6000, 6400, 6800, 7200, 7800, 7900, 8300, 8500, 8700, 9300, 9600, 9900, 10000000),
labels = c("AGR", "EXT", "UTL", "CON", "MFG", "WHL", "RET", "TRN", "INF", "FIN", "PRF", "EDU", "MED", "SCA", "ENT", "SRV", "ADM", "MIL", "UEM"), right = TRUE)
# Manufacturing
mfg <- all_roc %>% filter(ind_cat=="MFG")
tapply(mfg$PWGTP, list(mfg$GRPIP_cat), sum)
prop.table(tapply(mfg$PWGTP, list(mfg$GRPIP_cat), sum)) # 36.8% are rent burdened
# Retail
ret <- all_roc %>% filter(ind_cat=="RET")
tapply(ret$PWGTP, list(ret$GRPIP_cat), sum)
prop.table(tapply(ret$PWGTP, list(ret$GRPIP_cat), sum)) # 51.7% are rent burdened
# Professional
prf <- all_roc %>% filter(ind_cat=="PRF")
tapply(prf$PWGTP, list(prf$GRPIP_cat), sum)
prop.table(tapply(prf$PWGTP, list(prf$GRPIP_cat), sum)) # 47.2% are rent burdened
# Medical
med <- all_roc %>% filter(ind_cat=="MED")
tapply(med$PWGTP, list(med$GRPIP_cat), sum)
prop.table(tapply(med$PWGTP, list(med$GRPIP_cat), sum)) # 40.5% are rent burdened
# Social services and care
sca <- all_roc %>% filter(ind_cat=="SCA")
tapply(sca$PWGTP, list(sca$GRPIP_cat), sum)
prop.table(tapply(sca$PWGTP, list(sca$GRPIP_cat), sum)) # 47.1% are rent burdened
# Standard Error Example
own <- filter(hh_roc, VACS==1)
pt.est <- sum(own$WGTP)
rep.names <- paste0('WGTP', 1:80)
rep.ests <- sapply(rep.names, function(n) sum(own[[n]]))
sqrt((4/80) * sum((rep.ests - pt.est)^2))
#-------------------------------------------------------------------------------
# OCPIP and HINCP: Gross owner costs as % of HH income; HH income
#-------------------------------------------------------------------------------
# Histogram of owner costs as % of HH income
hh_roc %>%
ggplot(aes(x=OCPIP, weight = WGTP)) +
geom_histogram() +
stat_bin(binwidth=5, geom="text", aes(label=..count..), vjust=-1.0) # Gross rent as % HH income
# Generate categories for home ownership cost burden
hh_roc$OCPIP_cat <- cut(hh_roc$OCPIP, breaks = c(0, 30, 50, 60, 80, 100, 10000000), labels = c(1,2,3,4,5,6), right = TRUE)
summary(hh_roc$OCPIP_cat)
tapply(hh_roc$WGTP, list(hh_roc$OCPIP_cat), sum)
prop.table(tapply(hh_roc$WGTP, list(hh_roc$OCPIP_cat), sum))
# <30% income: 77.2%
# 30-50% income: 13.0%
# 50-60% income: 2.8%
# 60-80% income: 2.2%
# 80-100% income: 1.6%
# >100% income: 3.4%
# Graph of OCPIP compared to HH income
hh_roc %>%
ggplot() +
geom_point(aes(x=HINCP, y=OCPIP, size=WGTP), shape=21) +
xlim(0,400000) # graph view restricted to HINCP from $0-$75,000
# Note: compared to graph of GRPIP, this one is far more bottom left,
# meaning far fewer owners are housing burdened
#-------------------------------------------------------------------------------
# Household income by AMI
#-------------------------------------------------------------------------------
# Compute Rochester median income
w_median_roc <- w.median(hh_roc$HINCP, hh_roc$WGTP) # command from cwhmisc package
# Compute Monroe County median income
w_median_mon <- w.median(hh_monroe$HINCP, hh_monroe$WGTP)
# Calculate AMI for Rochester Metro Area
# Look at HUD definition of AMI based on family size
# Generate AMI based on Rochester median income
hh_roc$ami_roc = hh_roc$HINCP/w_median_roc
# Generate Rochester AMI categories
hh_roc$ami_roc_cat <- cut(hh_roc$ami_roc, breaks = c(0, 0.3, 0.5, 0.6, 0.8, 1.0, 1.2, 10000000), labels = c(1,2,3,4,5,6,7), right = TRUE)
summary(hh_roc$ami_roc_cat)
tapply(hh_roc$WGTP, list(hh_roc$ami_roc_cat), sum)
prop.table(tapply(hh_roc$WGTP, list(hh_roc$ami_roc_cat), sum))
# <30% AMI: 12.9%
# 30-50% AMI: 12.2%
# 50-60% AMI: 4.9%
# 60-80% AMI: 10.0%
# 80-100% AMI: 9.0%
# 100-120% AMI: 7.1%
# >=120% AMI: 43.8%
# Generate AMI based on Monroe County median income
hh_roc$ami_mon = hh_roc$HINCP/w_median_mon
# Generate Monroe County AMI categories
hh_roc$ami_mon_cat <- cut(hh_roc$ami_mon, breaks = c(0, 0.3, 0.5, 0.6, 0.8, 1.0, 1.2, 10000000), labels = c(1,2,3,4,5,6,7), right = TRUE)
summary(hh_roc$ami_mon_cat)
tapply(hh_roc$WGTP, list(hh_roc$ami_mon_cat), sum)
prop.table(tapply(hh_roc$WGTP, list(hh_roc$ami_mon_cat), sum))
# <30% AMI: 25.5%
# 30-50% AMI: 16.6%
# 50-60% AMI: 7.9%
# 60-80% AMI: 11.5%
# 80-100% AMI: 8.9%
# 100-120% AMI: 6.5%
# >=120% AMI: 23.1%
#-------------------------------------------------------------------------------
# Gross Rent
#-------------------------------------------------------------------------------
# GRNTP = monthly gross rent. Create AGRNTP = annual gross rent
hh_roc$AGRNTP = hh_roc$GRNTP*12
with(hh_roc, Hmisc::wtd.quantile(GRNTP, weights=WGTP))
with(hh_roc, Hmisc::wtd.quantile(AGRNTP, weights=WGTP))
# Calculate income for which the unit is affordable (<=30% HH income)
# Formula: percent AMI = (Gross rent / 0.3 / AMI) * 100
# Rochester AMI
hh_roc$aff_inc_roc = (hh_roc$AGRNTP / 0.3 / w_median_roc)*100
with(hh_roc, Hmisc::wtd.quantile(aff_inc_roc, probs = c(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1), weights=WGTP))
# At the Rochester AMI, less than 10% of units are affordable for 30% AMI
# Monroe County AMI
hh_roc$aff_inc_mon = (hh_roc$AGRNTP / 0.3 / w_median_mon)*100
with(hh_roc, Hmisc::wtd.quantile(aff_inc_mon, probs = c(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1), weights=WGTP))
# At Monroe County AMI, approx. 10% of units are affordable for 30% AMI
# Generate Monroe County AMI affordability categories for rental units
hh_roc$aff_inc_cat <- cut(hh_roc$aff_inc_mon, breaks = c(0, 30, 50, 60, 80, 100, 120, 10000000), labels = c(1,2,3,4,5,6,7), right = TRUE)
summary(hh_roc$aff_inc_cat)
tapply(hh_roc$WGTP, list(hh_roc$aff_inc_cat), sum)
prop.table(tapply(hh_roc$WGTP, list(hh_roc$aff_inc_cat), sum))
# 10.1% of units exclusively fall within <30% AMI affordability
# 24.2% of units exclusively fall within 30-50% AMI affordability
# 19.1% of units exclusively fall within 50-60% AMI affordability
# 29.9% of units exclusively fall within 60-80% AMI affordability
# 9.9% of units exclusively fall within 80-100% AMI affordability
# 3.6% of units exclusively fall within 100-120% AMI affordability
# 3.1 % of units exclusively fall within >=120% AMI affordability
# Histogram of rental units at Monroe County AMI
hh_roc %>%
ggplot(aes(x=aff_inc_mon, weight = WGTP)) +
geom_histogram() +
stat_bin(binwidth=10, geom="text", aes(label=..count..), vjust=-1.0) # AMI needed for units
# Generate Rochester AMI affordability categories for rental units
hh_roc$aff_inc_cat_roc <- cut(hh_roc$aff_inc_roc, breaks = c(0, 30, 50, 60, 80, 100, 120, 10000000), labels = c(1,2,3,4,5,6,7), right = TRUE)
summary(hh_roc$aff_inc_cat_roc)
tapply(hh_roc$WGTP, list(hh_roc$aff_inc_cat_roc), sum)
prop.table(tapply(hh_roc$WGTP, list(hh_roc$aff_inc_cat_roc), sum))
# 3.9% of units exclusively fall within <30% AMI affordability
# 6.0% of units exclusively fall within 30-50% AMI affordability
# 3.8% of units exclusively fall within 50-60% AMI affordability
# 15.8% of units exclusively fall within 60-80% AMI affordability
# 22.7% of units exclusively fall within 80-100% AMI affordability
# 20.6% of units exclusively fall within 100-120% AMI affordability
# 27.2% of units exclusively fall within >=120% AMI affordability
# The category percentages are different when I use Rochester AMI because absolute
# rent prices are included in the calculation, which aren't based on AMI
#-------------------------------------------------------------------------------
# Miscellaneous
#-------------------------------------------------------------------------------
#**************************************
# Standard errors attempt - create proportions dataset
all <- hh_roc %>%
select(FES,
starts_with("WGTP"),
starts_with("GRPIP")
)
sub <- hh_rental %>%
select(FES,
starts_with("WGTP"),
starts_with("GRPIP")
)
prop <- sub/all
#**************************************
#**************************************
# Standard errors attempt - calculation of MV variable
# use "survey" package to set survey design and specify replicate weights
pumsd_hh <- hh_roc %>%
as_survey_rep(weights = WGTP, repweights = starts_with("WGTP"), combined_weights = TRUE)
# calculate mean and std. error of "length of time" variable
pumsd_hh %>% filter(!is.na(MV)) %>%
summarise(survey_mean(MV, na.rm = TRUE))
# mean=3.56, se=0.0131
# mean same as with previous method, so this is correct
x <- sqrt(wtd.var(hh_roc$MV, hh_roc$WGTP))
y <- x/sqrt(sum(hh_roc$WGTP))
#*****************************************
#*****************************************
# Standard Errors Attempt - survey package to calculate SE of person-level data, age
# Calculate mean and std. error of age overall and by category
# use "survey" package to set survey design and specify replicate weights
pumsd_all <- all_roc %>%
as_survey_rep(
weights = PWGTP,
repweights = starts_with("PWGTP"),
combined_weights = TRUE
)
# calculate mean and std. error of age
pumsd_all %>%
filter(!is.na(AGEP)) %>%
summarise(
survey_mean(AGEP, na.rm = TRUE)
)
# mean=, se=
# error message: Error in qr.default(weights(design, "analysis"), tol = 1e-05) :
# NA/NaN/Inf in foreign function call (arg 1)
# Another try
pumsd_all <-
svrepdesign(
weight = ~PWGTP ,
repweights = 'PWGTP[0-9]+' ,
scale = 4 / 80 ,
rscales = rep( 1 , 80 ) ,
mse = TRUE ,
type = 'JK1' ,
data = all_roc
)
#*************************************
|
library(data.table)
library(dplyr)
##Read Supporting Metadata
directory <- setwd("D:/R learning/dataclean/UCI HAR Dataset")
## download variable names for features
featureNames <- read.table("D:/R learning/dataclean/UCI HAR Dataset/features.txt" ,,header = FALSE)
activityLabels <- read.table("D:/R learning/dataclean/UCI HAR Dataset/activity_labels.txt", header = FALSE)
##Read training data
subjectTrain <- read.table("D:/R learning/dataclean/UCI HAR Dataset/train/subject_train.txt", header = FALSE)
activityTrain <- read.table("D:/R learning/dataclean/UCI HAR Dataset/train/y_train.txt", header = FALSE)
featuresTrain <- read.table("D:/R learning/dataclean/UCI HAR Dataset/train/X_train.txt", header = FALSE)
##Read test data
subjectTest <- read.table("D:/R learning/dataclean/UCI HAR Dataset/test/subject_test.txt", header = FALSE)
activityTest <- read.table("D:/R learning/dataclean/UCI HAR Dataset/test/y_test.txt", header = FALSE)
featuresTest <- read.table("D:/R learning/dataclean/UCI HAR Dataset/test/X_test.txt", header = FALSE)
##Merge the training and the test sets to create one data set
subject <- rbind(subjectTrain, subjectTest)
activity <- rbind(activityTrain, activityTest)
features <- rbind(featuresTrain, featuresTest)
##Look at the properties of the above datasets
str(subject)
str(features)
str(activity)
## set names to variables
names(subject)<-c("subject")
names(activity)<- c("activity")
names(features)<- featureNames$V2
##Merge columns to get the data frame Data for all data
Data <- cbind(subject, activity, features )
##Extracts only the measurements on the mean and standard deviation for each measurement
subFeaturesNames<-featureNames$V2[ grep("mean\\(\\)|std\\(\\)", featureNames$V2)]
selectedNames<-c(as.character(subFeaturesNames), "subject", "activity" )
Data1<-subset(Data,select=selectedNames)
##Uses descriptive activity names to name the activities in the data set
names(activityLabels) <- c("activity", "levels")
merge = merge(Data1, activityLabels, by = 'activity')
##Appropriately labels the data set with descriptive variable names.
#prefix t is replaced by time
#Acc is replaced by Accelerometer
#Gyro is replaced by Gyroscope
#prefix f is replaced by frequency
#Mag is replaced by Magnitude
#BodyBody is replaced by Body
names(merge)<-gsub("^t", "time", names(merge))
names(merge)<-gsub("^f", "frequency", names(merge))
names(merge)<-gsub("Acc", "Accelerometer", names(merge))
names(merge)<-gsub("Gyro", "Gyroscope", names(merge))
names(merge)<-gsub("Mag", "Magnitude", names(merge))
names(merge)<-gsub("BodyBody", "Body", names(merge))
##creates a second data set with the average of each variable for each activity and each subject.
library(plyr)
Data2<-aggregate(. ~subject + activity, merge, mean)
Data2<-Data2[order(Data2$subject,Data2$activity),]
write.table(Data2, file = "tidydata.txt",row.name=FALSE)
##Prouduce Codebook
install.packages("knitr")
library(knitr)
knit2html("codebook.Rmd")
|
/run_analysis.R
|
no_license
|
aparnabasumukherjee/Data-cleaning-Assignment1
|
R
| false
| false
| 3,037
|
r
|
library(data.table)
library(dplyr)
##Read Supporting Metadata
directory <- setwd("D:/R learning/dataclean/UCI HAR Dataset")
## download variable names for features
featureNames <- read.table("D:/R learning/dataclean/UCI HAR Dataset/features.txt" ,,header = FALSE)
activityLabels <- read.table("D:/R learning/dataclean/UCI HAR Dataset/activity_labels.txt", header = FALSE)
##Read training data
subjectTrain <- read.table("D:/R learning/dataclean/UCI HAR Dataset/train/subject_train.txt", header = FALSE)
activityTrain <- read.table("D:/R learning/dataclean/UCI HAR Dataset/train/y_train.txt", header = FALSE)
featuresTrain <- read.table("D:/R learning/dataclean/UCI HAR Dataset/train/X_train.txt", header = FALSE)
##Read test data
subjectTest <- read.table("D:/R learning/dataclean/UCI HAR Dataset/test/subject_test.txt", header = FALSE)
activityTest <- read.table("D:/R learning/dataclean/UCI HAR Dataset/test/y_test.txt", header = FALSE)
featuresTest <- read.table("D:/R learning/dataclean/UCI HAR Dataset/test/X_test.txt", header = FALSE)
##Merge the training and the test sets to create one data set
subject <- rbind(subjectTrain, subjectTest)
activity <- rbind(activityTrain, activityTest)
features <- rbind(featuresTrain, featuresTest)
##Look at the properties of the above datasets
str(subject)
str(features)
str(activity)
## set names to variables
names(subject)<-c("subject")
names(activity)<- c("activity")
names(features)<- featureNames$V2
##Merge columns to get the data frame Data for all data
Data <- cbind(subject, activity, features )
##Extracts only the measurements on the mean and standard deviation for each measurement
subFeaturesNames<-featureNames$V2[ grep("mean\\(\\)|std\\(\\)", featureNames$V2)]
selectedNames<-c(as.character(subFeaturesNames), "subject", "activity" )
Data1<-subset(Data,select=selectedNames)
##Uses descriptive activity names to name the activities in the data set
names(activityLabels) <- c("activity", "levels")
merge = merge(Data1, activityLabels, by = 'activity')
##Appropriately labels the data set with descriptive variable names.
#prefix t is replaced by time
#Acc is replaced by Accelerometer
#Gyro is replaced by Gyroscope
#prefix f is replaced by frequency
#Mag is replaced by Magnitude
#BodyBody is replaced by Body
names(merge)<-gsub("^t", "time", names(merge))
names(merge)<-gsub("^f", "frequency", names(merge))
names(merge)<-gsub("Acc", "Accelerometer", names(merge))
names(merge)<-gsub("Gyro", "Gyroscope", names(merge))
names(merge)<-gsub("Mag", "Magnitude", names(merge))
names(merge)<-gsub("BodyBody", "Body", names(merge))
##creates a second data set with the average of each variable for each activity and each subject.
library(plyr)
Data2<-aggregate(. ~subject + activity, merge, mean)
Data2<-Data2[order(Data2$subject,Data2$activity),]
write.table(Data2, file = "tidydata.txt",row.name=FALSE)
##Prouduce Codebook
install.packages("knitr")
library(knitr)
knit2html("codebook.Rmd")
|
library(dplyr)
library(data.table)
library(stringi)
if(!exists("addSentenceMarks", mode="function")||
!exists("cleanSentence", mode="function")) source("utils.R")
loadFreqs <- function(){
if(!exists("freqMap")){
#print("Loading Data")
load("onePercentFreq.RData")
freqMap <<- list()
freqMap[["unigram"]] <<- as.data.table(unigramSW.freq)
freqMap[["bigram"]] <<- as.data.table(bigramSW.freq)
freqMap[["trigram"]] <<- as.data.table(trigramSW.freq)
freqMap[["fgram"]] <<- as.data.table(fgramSW.freq)
}
wrongs <<- 0
freqMap
}
loadFreqs()
getMostFrequentStart<- function(freqs,literal){
regexMatches <- grepl(paste("^",literal,sep=""),freqs$word)
freqs[which(regexMatches),]
}
getMostFrequentEnd <- function(freqs, literal){
regexMatches <- grepl(paste(literal,"$",sep=""),freqs$word)
freqs[which(regexMatches),]
}
getMostFrequent <- function(freqs, literal){
freqs[which(grepl(literal,freqs$word)),]
}
#This gives the relative frequency between an n-gram and its n-1gram based model
# It divides the frequency of the ngram by the frequency of the n-1gram
createRelativeFreq <- function(suffixFreqs,prefixFreqs,literal,partial="",amount=3){
prefixRegex <<- paste(paste("\\b",literal,sep=""),"\\b",sep="")
suffixRegex <<- ""
#print(paste("literal used for the prefix ",prefixRegex))
prefixes <<- getMostFrequentEnd(prefixFreqs,prefixRegex)
if(partial != ""){
suffixRegex <<- paste(prefixRegex,partial,collapse=" ")
}else{
suffixRegex <<- prefixRegex
}
#print(paste("literal used for the suffix ",suffixRegex))
possibilities <<- getMostFrequentStart(suffixFreqs,suffixRegex)
if(length(prefixes$freq)==0 && length(possibilities$freq)> 0){
previousFreq <- sum(possibilities$freq) #add it to the df
}else{
previousFreq <- head(prefixes,1)$freq
}
possibilities$relFreq <- possibilities$freq/previousFreq
possibilities <- arrange(possibilities,desc(relFreq))
possibilities <- head(select(possibilities,word,relFreq),amount)
possibilities$word <- stri_extract_last_words(possibilities$word)
possibilities
}
getSimpleFreq <- function(words.df,search){
simpleResults <- getMostFrequentStart(words.df,search)
simpleResults <- head(arrange(simpleResults, desc(freq)),3)
simpleResults
}
#Use the regular expression symbol \\W to match non-word characters,
#using + to indicate one or more in a row, along with gregexpr to find
#all matches in a string. Words are the number of word separators plus 1
createSuggestions <- function(literal){
completeLiteral <-""
if(is.na(literal)){
literal <- " "
}
addOccurence <- FALSE
if (grepl("[[:blank:]]$",literal)){#Espera recomendacion completa
#print("complete word")
wordCount <- sapply(gregexpr("\\W+", literal), length)
completeLiteral <- trim(literal)
partialFinalWord <- ""
addOccurence <- TRUE
}else{#remover ultima palabra y predecir esa palabra que se esta escribiendo
partialFinalWord <- stri_extract_last_words(literal)
#print(paste("currently writing ",partialFinalWord,sep=""))
#remove last word being typed
completeLiteral <- gsub("\\s*\\w*$","",literal)
wordCount <- sapply(gregexpr("\\W+", completeLiteral), length)
completeLiteral <- trim.leading(completeLiteral)
}
#print(paste("prefix to use",completeLiteral))
#print(paste("partial being used",partialFinalWord))
#reduce literal
#print(paste("Word count of",wordCount))
ngrams <- getNgrams(wordCount)
##This condition help us manage base case of backoff recursion
if(wordCount == 1 && completeLiteral==""){
suggestionList <- getSimpleFreq(ngrams[[1]],partialFinalWord)
}else{
suggestionList <- createRelativeFreq(suffixFreqs = ngrams[[2]],
prefixFreqs = ngrams[[1]],
completeLiteral,
partialFinalWord)
}
if(addOccurence){
#add to corresponding ngram
#print("adding Ocurrence")
addNGramOcurrence(ngrams[[1]],trim(literal),wordCount)
}
if(length(suggestionList$word) == 0){
#backingOff with recursion
wrongs <<- wrongs+1
#print(paste("Backing off to ",wordCount-1))
createSuggestions(gsub("^\\s*\\w*","",literal))
}else{
suggestionList
}
}
getNgrams <- function(amountOfWords){
result <- list()
ngram <<- amountOfWords + 1
result[[1]] <- freqMap[[amountOfWords]]
result[[2]] <- freqMap[[ngram]]
result
}
addNGramOcurrence <- function(frame,input,mapIndex,amount = 1){
index <- 0
if(mapIndex == 0){
index <- 1
}else{
index <- mapIndex
}
regexString <- paste(paste("\\b",input,sep=""),"\\b$",sep="")
newGram <- frame[which(grepl(regexString,word)),freq := freq+amount]
modifiedGram <- list()
modifiedGram[[getListName(mapIndex)]] <- newGram
freqMap <<- modifyList(freqMap,modifiedGram)
}
getListName <- function(index){
listName <- ""
if(index == 1 || index == 0){
listName <- "unigram"
}else if(index == 2){
listName <- "bigram"
}else if(index == 3){
listName <- "trigram"
}else{
listName <- "fgram"
}
listName
}
getNextWord <- function(textInput){
createSuggestions(cleanSentence(textInput))
}
getUnknowns <- function(){
wrongs
}
#addedDT <- data.table(word=c("qqqq"),freq=c(1))
#dtt <- as.data.table(freqMap[[1]])
#dtt <- rbindlist(list(dtt,addedDT))
|
/predictionModel.R
|
no_license
|
villalobos23/CapProyect
|
R
| false
| false
| 5,338
|
r
|
library(dplyr)
library(data.table)
library(stringi)
if(!exists("addSentenceMarks", mode="function")||
!exists("cleanSentence", mode="function")) source("utils.R")
loadFreqs <- function(){
if(!exists("freqMap")){
#print("Loading Data")
load("onePercentFreq.RData")
freqMap <<- list()
freqMap[["unigram"]] <<- as.data.table(unigramSW.freq)
freqMap[["bigram"]] <<- as.data.table(bigramSW.freq)
freqMap[["trigram"]] <<- as.data.table(trigramSW.freq)
freqMap[["fgram"]] <<- as.data.table(fgramSW.freq)
}
wrongs <<- 0
freqMap
}
loadFreqs()
getMostFrequentStart<- function(freqs,literal){
regexMatches <- grepl(paste("^",literal,sep=""),freqs$word)
freqs[which(regexMatches),]
}
getMostFrequentEnd <- function(freqs, literal){
regexMatches <- grepl(paste(literal,"$",sep=""),freqs$word)
freqs[which(regexMatches),]
}
getMostFrequent <- function(freqs, literal){
freqs[which(grepl(literal,freqs$word)),]
}
#This gives the relative frequency between an n-gram and its n-1gram based model
# It divides the frequency of the ngram by the frequency of the n-1gram
createRelativeFreq <- function(suffixFreqs,prefixFreqs,literal,partial="",amount=3){
prefixRegex <<- paste(paste("\\b",literal,sep=""),"\\b",sep="")
suffixRegex <<- ""
#print(paste("literal used for the prefix ",prefixRegex))
prefixes <<- getMostFrequentEnd(prefixFreqs,prefixRegex)
if(partial != ""){
suffixRegex <<- paste(prefixRegex,partial,collapse=" ")
}else{
suffixRegex <<- prefixRegex
}
#print(paste("literal used for the suffix ",suffixRegex))
possibilities <<- getMostFrequentStart(suffixFreqs,suffixRegex)
if(length(prefixes$freq)==0 && length(possibilities$freq)> 0){
previousFreq <- sum(possibilities$freq) #add it to the df
}else{
previousFreq <- head(prefixes,1)$freq
}
possibilities$relFreq <- possibilities$freq/previousFreq
possibilities <- arrange(possibilities,desc(relFreq))
possibilities <- head(select(possibilities,word,relFreq),amount)
possibilities$word <- stri_extract_last_words(possibilities$word)
possibilities
}
getSimpleFreq <- function(words.df,search){
simpleResults <- getMostFrequentStart(words.df,search)
simpleResults <- head(arrange(simpleResults, desc(freq)),3)
simpleResults
}
#Use the regular expression symbol \\W to match non-word characters,
#using + to indicate one or more in a row, along with gregexpr to find
#all matches in a string. Words are the number of word separators plus 1
createSuggestions <- function(literal){
completeLiteral <-""
if(is.na(literal)){
literal <- " "
}
addOccurence <- FALSE
if (grepl("[[:blank:]]$",literal)){#Espera recomendacion completa
#print("complete word")
wordCount <- sapply(gregexpr("\\W+", literal), length)
completeLiteral <- trim(literal)
partialFinalWord <- ""
addOccurence <- TRUE
}else{#remover ultima palabra y predecir esa palabra que se esta escribiendo
partialFinalWord <- stri_extract_last_words(literal)
#print(paste("currently writing ",partialFinalWord,sep=""))
#remove last word being typed
completeLiteral <- gsub("\\s*\\w*$","",literal)
wordCount <- sapply(gregexpr("\\W+", completeLiteral), length)
completeLiteral <- trim.leading(completeLiteral)
}
#print(paste("prefix to use",completeLiteral))
#print(paste("partial being used",partialFinalWord))
#reduce literal
#print(paste("Word count of",wordCount))
ngrams <- getNgrams(wordCount)
##This condition help us manage base case of backoff recursion
if(wordCount == 1 && completeLiteral==""){
suggestionList <- getSimpleFreq(ngrams[[1]],partialFinalWord)
}else{
suggestionList <- createRelativeFreq(suffixFreqs = ngrams[[2]],
prefixFreqs = ngrams[[1]],
completeLiteral,
partialFinalWord)
}
if(addOccurence){
#add to corresponding ngram
#print("adding Ocurrence")
addNGramOcurrence(ngrams[[1]],trim(literal),wordCount)
}
if(length(suggestionList$word) == 0){
#backingOff with recursion
wrongs <<- wrongs+1
#print(paste("Backing off to ",wordCount-1))
createSuggestions(gsub("^\\s*\\w*","",literal))
}else{
suggestionList
}
}
getNgrams <- function(amountOfWords){
result <- list()
ngram <<- amountOfWords + 1
result[[1]] <- freqMap[[amountOfWords]]
result[[2]] <- freqMap[[ngram]]
result
}
addNGramOcurrence <- function(frame,input,mapIndex,amount = 1){
index <- 0
if(mapIndex == 0){
index <- 1
}else{
index <- mapIndex
}
regexString <- paste(paste("\\b",input,sep=""),"\\b$",sep="")
newGram <- frame[which(grepl(regexString,word)),freq := freq+amount]
modifiedGram <- list()
modifiedGram[[getListName(mapIndex)]] <- newGram
freqMap <<- modifyList(freqMap,modifiedGram)
}
getListName <- function(index){
listName <- ""
if(index == 1 || index == 0){
listName <- "unigram"
}else if(index == 2){
listName <- "bigram"
}else if(index == 3){
listName <- "trigram"
}else{
listName <- "fgram"
}
listName
}
getNextWord <- function(textInput){
createSuggestions(cleanSentence(textInput))
}
getUnknowns <- function(){
wrongs
}
#addedDT <- data.table(word=c("qqqq"),freq=c(1))
#dtt <- as.data.table(freqMap[[1]])
#dtt <- rbindlist(list(dtt,addedDT))
|
# ===============================
# About: Time series similarities methods
# Dependences: utils.r, rqa.r
# Author: Lucas Pagliosa
# Data of creation: 12/02/16
# Last revision 12/02/15
# ==============================
source("~/Canon/R/utils.r")
sourceFiles("~/Canon/R/RQA.r")
loadPackages("dtw")
dtwr <- function(ts1, ts2, ...)
{
return(dtw(ts1, ts2)$distance)
}
dtwn <- function(ts1, ts2, ...)
{
return(dtw(ts1, ts2)$normalizedDistance)
}
dtwd <- function(ts1, ts2, ...)
{
return(dtwr(ts1, ts2) / euclidean(ts1, ts2))
}
distanceToDiagonal <- function(predict, test, size)
{
linex = seq(1, size, length = length(predict))
liney = seq(1, size, length = length(test))
sum = sum(sqrt((predict - linex)^2 + (test - liney)^2))
# printf("size/length: %d/%d\n", size, length(test))
return (sum/length(test))
}
mddl <- function(predict, test, ..., plot = FALSE, shaded = FALSE, save = F,
fileName = "mddl", xlab = "", ylab = "", retDTW = F)
{
dtw = dtw(test, predict)
mddlPlot <- function()
{
plot(dtw, xlab = xlab, ylab = ylab, ...)
lines(x = seq(1, length(predict), length = length(predict)),
y = seq(1,length(test),length = length(predict)), lty = 2)
if(shaded)
{
x = seq(1, dtw$N, length = length(dtw$index1))
y = seq(1, dtw$N, length = length(dtw$index2))
for(i in 1:length(dtw$index1))
lines(c(dtw$index1[i], x[i]), c(dtw$index2[i], y[i]))
}
}
if (save)
savePDF(mddlPlot(), fileName)
if (plot)
myPlot(mddlPlot())
dist = distanceToDiagonal(dtw$index1, dtw$index2, length(test))
if (!retDTW)
return(dist)
return(list(MDDL = dist, DTW = dtw$distance, DTWN = dtw$normalizedDistance))
}
distanceComparation <- function(nop = 200)
{
sine = sin(2*-pi*seq(0, 2.5, len = nop))
noise0 = sine + rnorm(nop, 0, 0.1)
noise1 = sine + rnorm(nop, 0, 0.4)
noise2 = sine + rnorm(nop, 0, 0.7)
noise3 = sine + rnorm(nop, 0, 0.9)
mean = rep(mean(sine), nop)
plot(noise3, col = 5, type = "l")
lines(noise2, col = 6)
lines(noise1, col = 4)
lines(noise0, col = 2)
lines(sine, col = 3)
lines(mean)
e1 = euclidean(sine, noise0)
e2 = euclidean(sine, noise1)
e3 = euclidean(sine, noise2)
e4 = euclidean(sine, noise3)
e5 = euclidean(sine, mean)
cat("ED(sine, noise0): ", e1, "\n")
cat("ED(sine, noise1): ", e2, "\n")
cat("ED(sine, noise2): ", e3, "\n")
cat("ED(sine, noise3): ", e4, "\n")
cat("ED(sine, mean): ", e5, "\n")
cat("=====================\n")
d1 = dtw(sine, noise0)$distance
d2 = dtw(sine, noise1)$distance
d3 = dtw(sine, noise2)$distance
d4 = dtw(sine, noise3)$distance
d5 = dtw(sine, mean)$distance
cat("dtw(sine, noise0): ", d1, "\n")
cat("dtw(sine, noise1): ", d2, "\n")
cat("dtw(sine, noise2): ", d3, "\n")
cat("dtw(sine, noise3): ", d4, "\n")
cat("dtw(sine, mean): ", d5, "\n")
cat("=====================\n")
cat("dtw-d(sine, noise0): ", d1/e1, "\n")
cat("dtw-d(sine, noise1): ", d2/e2, "\n")
cat("dtw-d(sine, noise2): ", d3/e3, "\n")
cat("dtw-d(sine, noise3): ", d4/e4, "\n")
cat("dtw-d(sine, mean): ", d5/e5, "\n")
cat("=====================\n")
cat("dtwn(sine, noise0): ", dtwn(sine, noise0), "\n")
cat("dtwn(sine, noise1): ", dtwn(sine, noise1), "\n")
cat("dtwn(sine, noise2): ", dtwn(sine, noise2), "\n")
cat("dtwn(sine, noise3): ", dtwn(sine, noise3), "\n")
cat("dtwn(sine, mean): ", dtwn(sine, mean), "\n")
cat("=====================\n")
cat("mddl(sine, noise0): ", mddl(sine, noise0), "\n")
cat("mddl(sine, noise1): ", mddl(sine, noise1), "\n")
cat("mddl(sine, noise2): ", mddl(sine, noise2), "\n")
cat("mddl(sine, noise3): ", mddl(sine, noise3), "\n")
cat("mddl(sine, mean): ", mddl(sine, mean), "\n")
cat("=====================\n")
es = embedd(sine, 2, 1)
e1 = embedd(noise0, 2, 1)
e2 = embedd(noise1, 2, 1)
e3 = embedd(noise2, 2, 1)
e4 = embedd(noise3, 2, 1)
e5 = embedd(mean, 2, 1)
eis = eig(cov(es))
ei1 = eig(cov(e1))
ei2 = eig(cov(e2))
ei3 = eig(cov(e3))
ei4 = eig(cov(e4))
ei5 = eig(cov(e5))
cat("aed(sine, noise1): ", euclidean(eis, ei1), "\n")
cat("aed(sine, noise2): ", euclidean(eis, ei2), "\n")
cat("aed(sine, noise3): ", euclidean(eis, ei3), "\n")
cat("aed(sine, noise3): ", euclidean(eis, ei4), "\n")
cat("aed(sine, mean): ", euclidean(eis, ei5), "\n")
cat("=====================\n")
# Over embedding
om = 6
od = 1
f = 0.2
es = embedd(sine, om, od)
e1 = embedd(noise0, om, od)
e2 = embedd(noise1, om, od)
e3 = embedd(noise2, om, od)
e4 = embedd(noise3, om, od)
e5 = embedd(mean, om, od)
cat("rqa(sine, noise1): ", 1 / smwp(rqa(es, e1, f))$maxline, "\n")
cat("rqa(sine, noise2): ", 1 / smwp(rqa(es, e2, f))$maxline, "\n")
cat("rqa(sine, noise3): ", 1 / smwp(rqa(es, e3, f))$maxline, "\n")
cat("rqa(sine, noise3): ", 1 / smwp(rqa(es, e4, f))$maxline, "\n")
cat("rqa(sine, mean): ", 1 / smwp(rqa(es, e5, f))$maxline, "\n")
}
showMDDLIsGood <- function(nop = 500, plot = F, save = F, legend = F, numberOfTries = 30)
{
ret = zeros(numberOfTries, 4)
for (i in 1:numberOfTries)
{
sine = sin(2*-pi*seq(0, 2.5, len = nop))
noise = sine + rnorm(nop, 0, 0.72)
mean = rep(mean(sine), nop)
mddlPlot <- function()
{
plot(noise, type = "l", xlab = "Time (t)", ylab = "Variable (x)", col = 2)
lines(sine)
lines(mean, col = 4)
if (legend)
legend(1, max(noise), c("Sine","noise 0.72", "mean"), lty = c(1, 4, 2));
}
if (save)
savePDF(mddlPlot(), fileName = "MDDL-comparation")
if (plot)
myPlot(mddlPlot())
edi = euclidean(sine, noise)
ret[i, 1] = edi
printf("ed(sine, noise): %f\n", edi)
printf("ed(sine, mean): %f\n", euclidean(sine, mean))
printf("==============\n")
dtwni = dtwn(sine, noise)
ret[i, 2] = dtwni
printf("dtwn(sine, noise): %f\n", dtwni)
printf("dtwn(sine, mean): %f\n", dtwn(sine, mean))
printf("==============\n")
dtwdi = dtwd(sine, noise)
ret[i, 3] = dtwdi
printf("dtwd(sine, noise): %f\n", dtwdi)
printf("dtwd(sine, mean): %f\n", dtwd(sine, mean))
printf("==============\n")
mddl(sine, noise, plot = plot, xlab = "Sine", ylab = "Sine + N(0, 0.72)",
save = save, fileName = "MDDL-match")
mddl(sine, noise, plot = plot, xlab = "Sine", ylab = "Sine + N(0, 0.72)",
shaded = T, save = save, fileName = "MDDL-diagonal")
mddli = mddl(sine, noise)
ret[i, 4] = mddli
printf("mddl(sine, noise): %f\n", mddli)
printf("mddl(sine, mean): %f\n", mddl(sine, mean))
printf("==============\n")
}
perf = zeros(4, 3)
for (i in 1:4)
{
vec = ret[,i]
perf[i,] = c(min(vec), mean(vec), max(vec))
}
return(perf)
}
|
/similaritiesFunctions.r
|
no_license
|
pagliosa/chaos
|
R
| false
| false
| 6,739
|
r
|
# ===============================
# About: Time series similarities methods
# Dependences: utils.r, rqa.r
# Author: Lucas Pagliosa
# Data of creation: 12/02/16
# Last revision 12/02/15
# ==============================
source("~/Canon/R/utils.r")
sourceFiles("~/Canon/R/RQA.r")
loadPackages("dtw")
dtwr <- function(ts1, ts2, ...)
{
return(dtw(ts1, ts2)$distance)
}
dtwn <- function(ts1, ts2, ...)
{
return(dtw(ts1, ts2)$normalizedDistance)
}
dtwd <- function(ts1, ts2, ...)
{
return(dtwr(ts1, ts2) / euclidean(ts1, ts2))
}
distanceToDiagonal <- function(predict, test, size)
{
linex = seq(1, size, length = length(predict))
liney = seq(1, size, length = length(test))
sum = sum(sqrt((predict - linex)^2 + (test - liney)^2))
# printf("size/length: %d/%d\n", size, length(test))
return (sum/length(test))
}
mddl <- function(predict, test, ..., plot = FALSE, shaded = FALSE, save = F,
fileName = "mddl", xlab = "", ylab = "", retDTW = F)
{
dtw = dtw(test, predict)
mddlPlot <- function()
{
plot(dtw, xlab = xlab, ylab = ylab, ...)
lines(x = seq(1, length(predict), length = length(predict)),
y = seq(1,length(test),length = length(predict)), lty = 2)
if(shaded)
{
x = seq(1, dtw$N, length = length(dtw$index1))
y = seq(1, dtw$N, length = length(dtw$index2))
for(i in 1:length(dtw$index1))
lines(c(dtw$index1[i], x[i]), c(dtw$index2[i], y[i]))
}
}
if (save)
savePDF(mddlPlot(), fileName)
if (plot)
myPlot(mddlPlot())
dist = distanceToDiagonal(dtw$index1, dtw$index2, length(test))
if (!retDTW)
return(dist)
return(list(MDDL = dist, DTW = dtw$distance, DTWN = dtw$normalizedDistance))
}
distanceComparation <- function(nop = 200)
{
sine = sin(2*-pi*seq(0, 2.5, len = nop))
noise0 = sine + rnorm(nop, 0, 0.1)
noise1 = sine + rnorm(nop, 0, 0.4)
noise2 = sine + rnorm(nop, 0, 0.7)
noise3 = sine + rnorm(nop, 0, 0.9)
mean = rep(mean(sine), nop)
plot(noise3, col = 5, type = "l")
lines(noise2, col = 6)
lines(noise1, col = 4)
lines(noise0, col = 2)
lines(sine, col = 3)
lines(mean)
e1 = euclidean(sine, noise0)
e2 = euclidean(sine, noise1)
e3 = euclidean(sine, noise2)
e4 = euclidean(sine, noise3)
e5 = euclidean(sine, mean)
cat("ED(sine, noise0): ", e1, "\n")
cat("ED(sine, noise1): ", e2, "\n")
cat("ED(sine, noise2): ", e3, "\n")
cat("ED(sine, noise3): ", e4, "\n")
cat("ED(sine, mean): ", e5, "\n")
cat("=====================\n")
d1 = dtw(sine, noise0)$distance
d2 = dtw(sine, noise1)$distance
d3 = dtw(sine, noise2)$distance
d4 = dtw(sine, noise3)$distance
d5 = dtw(sine, mean)$distance
cat("dtw(sine, noise0): ", d1, "\n")
cat("dtw(sine, noise1): ", d2, "\n")
cat("dtw(sine, noise2): ", d3, "\n")
cat("dtw(sine, noise3): ", d4, "\n")
cat("dtw(sine, mean): ", d5, "\n")
cat("=====================\n")
cat("dtw-d(sine, noise0): ", d1/e1, "\n")
cat("dtw-d(sine, noise1): ", d2/e2, "\n")
cat("dtw-d(sine, noise2): ", d3/e3, "\n")
cat("dtw-d(sine, noise3): ", d4/e4, "\n")
cat("dtw-d(sine, mean): ", d5/e5, "\n")
cat("=====================\n")
cat("dtwn(sine, noise0): ", dtwn(sine, noise0), "\n")
cat("dtwn(sine, noise1): ", dtwn(sine, noise1), "\n")
cat("dtwn(sine, noise2): ", dtwn(sine, noise2), "\n")
cat("dtwn(sine, noise3): ", dtwn(sine, noise3), "\n")
cat("dtwn(sine, mean): ", dtwn(sine, mean), "\n")
cat("=====================\n")
cat("mddl(sine, noise0): ", mddl(sine, noise0), "\n")
cat("mddl(sine, noise1): ", mddl(sine, noise1), "\n")
cat("mddl(sine, noise2): ", mddl(sine, noise2), "\n")
cat("mddl(sine, noise3): ", mddl(sine, noise3), "\n")
cat("mddl(sine, mean): ", mddl(sine, mean), "\n")
cat("=====================\n")
es = embedd(sine, 2, 1)
e1 = embedd(noise0, 2, 1)
e2 = embedd(noise1, 2, 1)
e3 = embedd(noise2, 2, 1)
e4 = embedd(noise3, 2, 1)
e5 = embedd(mean, 2, 1)
eis = eig(cov(es))
ei1 = eig(cov(e1))
ei2 = eig(cov(e2))
ei3 = eig(cov(e3))
ei4 = eig(cov(e4))
ei5 = eig(cov(e5))
cat("aed(sine, noise1): ", euclidean(eis, ei1), "\n")
cat("aed(sine, noise2): ", euclidean(eis, ei2), "\n")
cat("aed(sine, noise3): ", euclidean(eis, ei3), "\n")
cat("aed(sine, noise3): ", euclidean(eis, ei4), "\n")
cat("aed(sine, mean): ", euclidean(eis, ei5), "\n")
cat("=====================\n")
# Over embedding
om = 6
od = 1
f = 0.2
es = embedd(sine, om, od)
e1 = embedd(noise0, om, od)
e2 = embedd(noise1, om, od)
e3 = embedd(noise2, om, od)
e4 = embedd(noise3, om, od)
e5 = embedd(mean, om, od)
cat("rqa(sine, noise1): ", 1 / smwp(rqa(es, e1, f))$maxline, "\n")
cat("rqa(sine, noise2): ", 1 / smwp(rqa(es, e2, f))$maxline, "\n")
cat("rqa(sine, noise3): ", 1 / smwp(rqa(es, e3, f))$maxline, "\n")
cat("rqa(sine, noise3): ", 1 / smwp(rqa(es, e4, f))$maxline, "\n")
cat("rqa(sine, mean): ", 1 / smwp(rqa(es, e5, f))$maxline, "\n")
}
showMDDLIsGood <- function(nop = 500, plot = F, save = F, legend = F, numberOfTries = 30)
{
ret = zeros(numberOfTries, 4)
for (i in 1:numberOfTries)
{
sine = sin(2*-pi*seq(0, 2.5, len = nop))
noise = sine + rnorm(nop, 0, 0.72)
mean = rep(mean(sine), nop)
mddlPlot <- function()
{
plot(noise, type = "l", xlab = "Time (t)", ylab = "Variable (x)", col = 2)
lines(sine)
lines(mean, col = 4)
if (legend)
legend(1, max(noise), c("Sine","noise 0.72", "mean"), lty = c(1, 4, 2));
}
if (save)
savePDF(mddlPlot(), fileName = "MDDL-comparation")
if (plot)
myPlot(mddlPlot())
edi = euclidean(sine, noise)
ret[i, 1] = edi
printf("ed(sine, noise): %f\n", edi)
printf("ed(sine, mean): %f\n", euclidean(sine, mean))
printf("==============\n")
dtwni = dtwn(sine, noise)
ret[i, 2] = dtwni
printf("dtwn(sine, noise): %f\n", dtwni)
printf("dtwn(sine, mean): %f\n", dtwn(sine, mean))
printf("==============\n")
dtwdi = dtwd(sine, noise)
ret[i, 3] = dtwdi
printf("dtwd(sine, noise): %f\n", dtwdi)
printf("dtwd(sine, mean): %f\n", dtwd(sine, mean))
printf("==============\n")
mddl(sine, noise, plot = plot, xlab = "Sine", ylab = "Sine + N(0, 0.72)",
save = save, fileName = "MDDL-match")
mddl(sine, noise, plot = plot, xlab = "Sine", ylab = "Sine + N(0, 0.72)",
shaded = T, save = save, fileName = "MDDL-diagonal")
mddli = mddl(sine, noise)
ret[i, 4] = mddli
printf("mddl(sine, noise): %f\n", mddli)
printf("mddl(sine, mean): %f\n", mddl(sine, mean))
printf("==============\n")
}
perf = zeros(4, 3)
for (i in 1:4)
{
vec = ret[,i]
perf[i,] = c(min(vec), mean(vec), max(vec))
}
return(perf)
}
|
# Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_emissions_L161.nonghg_en_ssp_R_S_T_Y
#'
#' Produce future non-GHG emissions factors by SSP scenario.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L161.SSP15_EF}, \code{L161.SSP2_EF}, \code{L161.SSP34_EF}. The corresponding file in the
#' original data system was \code{L161.nonghg_en_ssp_R_S_T_Y.R} (emissions level1).
#' @details Scales future GAINS, Greenhouse Gas - Air Pollution Interactions and Synergies model, non-GHG emissions factors to L111/L114 base year emissions factors,
#' then applies future emissions factors to some GCAM years based on SSP-specific rules.
#' @importFrom assertthat assert_that
#' @importFrom dplyr bind_rows distinct filter if_else group_by lag left_join mutate order_by select summarise
#' @importFrom tidyr complete gather nesting replace_na
#' @author RLH July 2017
module_emissions_L161.nonghg_en_ssp_R_S_T_Y <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "emissions/A_regions",
FILE = "emissions/mappings/GCAM_sector_tech",
FILE = "emissions/mappings/GCAM_sector_tech_Revised",
FILE = "emissions/mappings/gains_to_gcam_sector",
FILE = "emissions/GAINS_activities",
FILE = "emissions/GAINS_emissions",
"L102.pcgdp_thous90USD_Scen_R_Y",
"L111.nonghg_tgej_R_en_S_F_Yh",
"L114.bcoc_tgej_R_en_S_F_2000",
FILE = "emissions/A61_emfact_rules"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L161.SSP15_EF",
"L161.SSP2_EF",
"L161.SSP34_EF"))
} else if(command == driver.MAKE) {
# Silence package checks
ACT <- CLE <- CLE_2010 <- CLE_2020 <- CLE_2030 <- CLE_base <- GAINS_region <-
GCAM_region_ID <- GCAM_tag <- IDYEARS <- IIASA_sector <- MFR <- MFR_2030 <-
Non.CO2 <- POLL <- SLE <- SLE_2030 <- SSP_group <- TIMER_REGION <- agg_sector <-
base_value <- emfact <- marker_region_CLE_2020 <- marker_region_CLE_2030 <-
marker_region_SLE_2030 <- min_CLE_2030_low <- min_CLE_2030_weak_reg <-
min_SLE_2030_strong_reg <- policy <- prev <- region_grouping <- scaler <-
scenario <- stub.technology <- subsector <- supplysector <- value <-
variable <- varyear <- varyearpol <- year <- marker_value <- min_value <-
min_value <- multiplier <- . <- `2000` <- NULL # silence package check notes
all_data <- list(...)[[1]]
# Load required inputs
A_regions <- get_data(all_data, "emissions/A_regions")
GCAM_sector_tech <- get_data(all_data, "emissions/mappings/GCAM_sector_tech")
if (energy.TRAN_UCD_MODE == "rev.mode"){
GCAM_sector_tech <- get_data(all_data, "emissions/mappings/GCAM_sector_tech_Revised")
}
GAINS_sector <- get_data(all_data, "emissions/mappings/gains_to_gcam_sector")
GAINS_activities <- get_data(all_data, "emissions/GAINS_activities")
GAINS_emissions <- get_data(all_data, "emissions/GAINS_emissions") %>%
# NOTE: these are three different scenarios
# CLE = current legislation, SLE = stringent legislation, MFR = maximum feasible reductions
gather(scenario, value, CLE, MFR, SLE)
L102.pcgdp_thous90USD_Scen_R_Y <- get_data(all_data, "L102.pcgdp_thous90USD_Scen_R_Y")
L111.nonghg_tgej_R_en_S_F_Yh <- get_data(all_data, "L111.nonghg_tgej_R_en_S_F_Yh")
L114.bcoc_tgej_R_en_S_F_2000 <- get_data(all_data, "L114.bcoc_tgej_R_en_S_F_2000") %>%
gather(year, value, `2000`) %>%
mutate(year = as.integer(year))
A61_emfact_rules <- get_data(all_data, "emissions/A61_emfact_rules")
# ===================================================
# Aggregate GAINS emissions data by GCAM sector
GAINS_emissions_agg <- GAINS_emissions %>%
# Change pollutant names
mutate(POLL = replace(POLL, POLL == "NOX", "NOx"),
POLL = replace(POLL, POLL == "VOC", "NMVOC")) %>%
# Use left_join because NAs in GAINS_sector
left_join(GAINS_sector, by = c("TIMER_SECTOR" = "IIASA_Sector")) %>%
group_by(TIMER_REGION, agg_sector = GCAM_tag, POLL, IDYEARS, scenario) %>%
summarise(value = sum(value)) %>%
na.omit %>%
ungroup
# Aggregate GAINS activity data by GCAM sector
GAINS_activities_agg <- GAINS_activities %>%
# Use left_join because NAs in GAINS_sector
left_join(GAINS_sector, by = c("TIMER_SECTOR" = "IIASA_Sector")) %>%
group_by(TIMER_REGION, agg_sector = GCAM_tag, IDYEARS) %>%
summarise(ACT = sum(ACT)) %>%
na.omit %>%
ungroup
# Compute emissions factors by dividing GAINS activity by GAINS emissions
GAINS_emfact <- GAINS_emissions_agg %>%
left_join_error_no_match(GAINS_activities_agg, by = c("TIMER_REGION", "agg_sector", "IDYEARS")) %>%
mutate(emfact = value / ACT) %>%
select(TIMER_REGION, agg_sector, POLL, IDYEARS, scenario, emfact) %>%
group_by(TIMER_REGION, agg_sector, POLL, IDYEARS) %>%
# Using CLE scenario for base value
mutate(CLE_base = emfact[scenario == "CLE"]) %>%
ungroup() %>%
# Replace SLE & MFR base year (2005) emissions factors with CLE emissions factors.
# They don't all start from the same value.
mutate(emfact = replace(emfact, IDYEARS == emissions.GAINS_BASE_YEAR, CLE_base[IDYEARS == emissions.GAINS_BASE_YEAR])) %>%
select(-CLE_base)
# Compute emissions factor scaler.
# These scalers are relative to the previous time period's numbers.
GAINS_emfact_scaler <- GAINS_emfact %>%
group_by(TIMER_REGION, agg_sector, POLL, scenario) %>%
# Create column of previous time period value
mutate(prev = lag(emfact, n = 1L, order_by = IDYEARS)) %>%
ungroup() %>%
filter(IDYEARS > emissions.GAINS_BASE_YEAR) %>%
# Divide current value by previous value, not allowing value greater than 1 (emissions factors cannot increase with time)
mutate(scaler = emfact / prev,
scaler = replace(scaler, scaler > 1, 1)) %>%
group_by(TIMER_REGION, agg_sector, POLL, scenario) %>%
mutate(scaler = cumprod(scaler)) %>%
ungroup() %>%
select(GAINS_region = TIMER_REGION, IIASA_sector = agg_sector, Non.CO2 = POLL, scenario, year = IDYEARS, scaler)
# Determine region groupings
pcgdp <- L102.pcgdp_thous90USD_Scen_R_Y %>%
# We are trying to filter to 2010. This code (taking the last historical year) was necessary to
# pass the timeshift, but is really not what we want to be doing, since the years in this code
# are fairly set in stone right now
filter(scenario == "SSP4", year == HISTORICAL_YEARS[length(HISTORICAL_YEARS)]) %>%
mutate(value = value * gdp_deflator(HISTORICAL_YEARS[length(HISTORICAL_YEARS)], 1990),
region_grouping = if_else(value >= emissions.LOW_PCGDP, "highmed", "low"))
# Compute future emissions factors for GAINS scenarios
emfact_scaled <- L111.nonghg_tgej_R_en_S_F_Yh %>%
filter(year == emissions.GAINS_BASE_YEAR) %>%
# Add in BC/OC emissions factors, assumed that 2005 emissions factors are identical to 2000
bind_rows(L114.bcoc_tgej_R_en_S_F_2000 %>%
mutate(year = emissions.GAINS_BASE_YEAR)) %>%
# Add GAINS regions and sectors
left_join_error_no_match(A_regions %>% select(GCAM_region_ID, GAINS_region), by = "GCAM_region_ID") %>%
left_join(GCAM_sector_tech %>% select(supplysector, subsector, stub.technology, IIASA_sector),
by = c("supplysector", "subsector", "stub.technology")) %>%
# Remove non-IIASA sectors and technologies with 0 emissions factor in base year. No reason to read in future zeroes.
filter(!is.na(IIASA_sector), value != 0) %>%
rename(base_year = year, base_value = value) %>%
left_join(GAINS_emfact_scaler, by = c("GAINS_region", "IIASA_sector", "Non.CO2")) %>%
# Scale L111/L114 emissions factors to GAINS scalers
mutate(emfact = base_value * scaler) %>%
left_join_error_no_match(pcgdp %>% select(GCAM_region_ID, region_grouping), by = "GCAM_region_ID") %>%
na.omit() %>%
select(GCAM_region_ID, Non.CO2, supplysector, subsector, stub.technology, GAINS_region, IIASA_sector,
scenario, year, emfact, region_grouping)
# Create list of countries with strong regulation based on elec_coal SO2 emissions factor
coal_so2 <- tibble(GCAM_region_ID = A_regions$GCAM_region_ID) %>%
left_join(
emfact_scaled %>%
filter(year == emissions.GAINS_YEARS[length(emissions.GAINS_YEARS)],
IIASA_sector == "elec_coal", Non.CO2 == "SO2", scenario == "CLE"),
by = "GCAM_region_ID") %>%
mutate(policy = if_else(emfact <= emissions.COAL_SO2_THRESHOLD, "strong_reg", "weak_reg"),
policy = replace(policy, region_grouping == "low", "low")) %>%
# If region is missing a value, assume it is a weak_reg
replace_na(list(policy = "weak_reg")) %>%
select(GCAM_region_ID, policy)
# Group SSPs by whether we process them the same
SSP_groups <- tibble(SSP_group = c("1&5", "2", "3&4"))
# Add the rules for each region, gas, technology
EF_rules <- emfact_scaled %>%
# This is to have same starting point as old code, which begins with NA omitted 2010 values
filter(year == emissions.GAINS_YEARS[1]) %>%
na.omit %>%
select(-year, -emfact, -scenario) %>%
distinct() %>%
# Repeat for future years and SSP groups
repeat_add_columns(tibble(year = c(2010, 2030, 2050, 2100))) %>%
repeat_add_columns(SSP_groups) %>%
# Join with policy type, but only for SSP group 2
left_join_error_no_match(coal_so2, by = "GCAM_region_ID") %>%
mutate(policy = replace(policy, SSP_group != "2", NA)) %>%
# Join with rules-use left_join b/c there are NA values in A61_emfact_rules
left_join(A61_emfact_rules, by = c("region_grouping", "year", "SSP_group", "policy"))
# Create a tibble with just marker region values
# Marker region is Western Europe (13) - some values will be set to its emissions factors in future
marker_region_df <- emfact_scaled %>%
filter(GCAM_region_ID == gcam.WESTERN_EUROPE_CODE) %>%
select(-GCAM_region_ID, -GAINS_region, -region_grouping) %>%
rename(marker_value = emfact)
# Combine all emissions factors
EF_all <- emfact_scaled %>%
# This is to have same starting point as old code, which begins with NA omitted 2010 values
filter(year == emissions.GAINS_YEARS[1]) %>%
na.omit %>%
select(-year, -emfact) %>%
left_join(emfact_scaled, by = c("GCAM_region_ID", "Non.CO2", "supplysector", "subsector", "stub.technology",
"GAINS_region", "IIASA_sector", "scenario", "region_grouping")) %>%
# Add in policy
left_join_error_no_match(coal_so2, by = "GCAM_region_ID") %>%
# Use complete to fill out any region/technology/gas combos that do not have emissions factors for all years
complete(year, nesting(GCAM_region_ID, Non.CO2, supplysector, subsector,
stub.technology, GAINS_region, IIASA_sector, scenario, region_grouping, policy)) %>%
# Calculate minimum by technology/gas
group_by(Non.CO2, supplysector, subsector, stub.technology, IIASA_sector, scenario, year, policy, region_grouping) %>%
mutate(min_value = min(emfact, na.rm = TRUE)) %>%
ungroup %>%
# Add marker region values
left_join(marker_region_df, by = c("Non.CO2", "supplysector", "subsector", "stub.technology", "IIASA_sector", "scenario", "year")) %>%
gather(variable, value, emfact, marker_value, min_value) %>%
# Add binary columns indicating if value is marker value or minimum value
mutate(marker_region = if_else(variable == "marker_value", 1, 0),
min = if_else(variable == "min_value", 1, 0)) %>%
rename(multiplier_year = year, multiplier_scenario = scenario) %>%
select(-GAINS_region, -region_grouping, -variable, -policy)
SSP_EF <- EF_rules %>%
# Join rules with values
left_join(EF_all, by = c("GCAM_region_ID", "Non.CO2", "supplysector", "subsector", "stub.technology", "IIASA_sector",
"multiplier_scenario", "multiplier_year", "marker_region", "min")) %>%
# Multiply non-NA values by multipliers
mutate(value = replace(value, !is.na(value), multiplier[!is.na(value)] * value[!is.na(value)])) %>%
select(GCAM_region_ID, Non.CO2, supplysector, subsector, stub.technology,
agg_sector = IIASA_sector, year, value, SSP_group) %>%
group_by(GCAM_region_ID, Non.CO2, supplysector, subsector, stub.technology,
agg_sector, SSP_group) %>%
# Set NA values to previous (non-NA) value
mutate(value = if_else(is.na(value), lag(value, n = 1L, order_by = year), value),
value = if_else(is.na(value), lag(value, n = 1L, order_by = year), value),
value = if_else(is.na(value), lag(value, n = 1L, order_by = year), value),
# Emission factors cannot increase-if any increases, change it to value from previous time step
prev = lag(value, n = 1L, order_by = year),
value = if_else(value > prev & !is.na(prev), prev, value),
prev = lag(value, n = 1L, order_by = year),
value = if_else(value > prev & !is.na(prev), prev, value),
prev = lag(value, n = 1L, order_by = year),
value = if_else(value > prev & !is.na(prev), prev, value)) %>%
select(-prev) %>%
ungroup
# Split data by SSP grouping
out_df <- SSP_EF %>%
split(.$SSP_group) %>%
lapply(function(df) {
select(df, -SSP_group)
})
# Produce outputs
out_df[["1&5"]] %>%
add_title("Emissions factors for SSP 1 and SSP 5") %>%
add_units("Tg/EJ") %>%
add_comments("Scales future GAINS emissions factors to L111/L114 base year emissions factors, then applies to GCAM future years") %>%
add_legacy_name("L161.SSP15_EF") %>%
add_precursors("emissions/A_regions",
"emissions/mappings/GCAM_sector_tech",
"emissions/mappings/GCAM_sector_tech_Revised",
"emissions/mappings/gains_to_gcam_sector",
"emissions/GAINS_activities",
"emissions/GAINS_emissions",
"L102.pcgdp_thous90USD_Scen_R_Y",
"L111.nonghg_tgej_R_en_S_F_Yh",
"L114.bcoc_tgej_R_en_S_F_2000",
"emissions/A61_emfact_rules") ->
L161.SSP15_EF
out_df[["2"]] %>%
add_title("Emissions factors for SSP 2") %>%
add_units("Tg/EJ") %>%
add_comments("Scales future GAINS emissions factors to L111/L114 base year emissions factors, then applies to GCAM future years") %>%
add_legacy_name("L161.SSP2_EF") %>%
add_precursors("emissions/A_regions",
"emissions/mappings/GCAM_sector_tech",
"emissions/mappings/GCAM_sector_tech_Revised",
"emissions/mappings/gains_to_gcam_sector",
"emissions/GAINS_activities",
"emissions/GAINS_emissions",
"L102.pcgdp_thous90USD_Scen_R_Y",
"L111.nonghg_tgej_R_en_S_F_Yh",
"L114.bcoc_tgej_R_en_S_F_2000",
"emissions/A61_emfact_rules") ->
L161.SSP2_EF
out_df[["3&4"]] %>%
add_title("Emissions factors for SSP 3 and SSP 4") %>%
add_units("Tg/EJ") %>%
add_comments("Scales future GAINS emissions factors to L111/L114 base year emissions factors, then applies to GCAM future years") %>%
add_legacy_name("L161.SSP34_EF") %>%
add_precursors("emissions/A_regions",
"emissions/mappings/GCAM_sector_tech",
"emissions/mappings/GCAM_sector_tech_Revised",
"emissions/mappings/gains_to_gcam_sector",
"emissions/GAINS_activities",
"emissions/GAINS_emissions",
"L102.pcgdp_thous90USD_Scen_R_Y",
"L111.nonghg_tgej_R_en_S_F_Yh",
"L114.bcoc_tgej_R_en_S_F_2000",
"emissions/A61_emfact_rules") ->
L161.SSP34_EF
return_data(L161.SSP15_EF, L161.SSP2_EF, L161.SSP34_EF)
} else {
stop("Unknown command")
}
}
|
/R/zchunk_L161.nonghg_en_ssp_R_S_T_Y.R
|
permissive
|
Liyang-Guo/gcamdata
|
R
| false
| false
| 16,742
|
r
|
# Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_emissions_L161.nonghg_en_ssp_R_S_T_Y
#'
#' Produce future non-GHG emissions factors by SSP scenario.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L161.SSP15_EF}, \code{L161.SSP2_EF}, \code{L161.SSP34_EF}. The corresponding file in the
#' original data system was \code{L161.nonghg_en_ssp_R_S_T_Y.R} (emissions level1).
#' @details Scales future GAINS, Greenhouse Gas - Air Pollution Interactions and Synergies model, non-GHG emissions factors to L111/L114 base year emissions factors,
#' then applies future emissions factors to some GCAM years based on SSP-specific rules.
#' @importFrom assertthat assert_that
#' @importFrom dplyr bind_rows distinct filter if_else group_by lag left_join mutate order_by select summarise
#' @importFrom tidyr complete gather nesting replace_na
#' @author RLH July 2017
module_emissions_L161.nonghg_en_ssp_R_S_T_Y <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "emissions/A_regions",
FILE = "emissions/mappings/GCAM_sector_tech",
FILE = "emissions/mappings/GCAM_sector_tech_Revised",
FILE = "emissions/mappings/gains_to_gcam_sector",
FILE = "emissions/GAINS_activities",
FILE = "emissions/GAINS_emissions",
"L102.pcgdp_thous90USD_Scen_R_Y",
"L111.nonghg_tgej_R_en_S_F_Yh",
"L114.bcoc_tgej_R_en_S_F_2000",
FILE = "emissions/A61_emfact_rules"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L161.SSP15_EF",
"L161.SSP2_EF",
"L161.SSP34_EF"))
} else if(command == driver.MAKE) {
# Silence package checks
ACT <- CLE <- CLE_2010 <- CLE_2020 <- CLE_2030 <- CLE_base <- GAINS_region <-
GCAM_region_ID <- GCAM_tag <- IDYEARS <- IIASA_sector <- MFR <- MFR_2030 <-
Non.CO2 <- POLL <- SLE <- SLE_2030 <- SSP_group <- TIMER_REGION <- agg_sector <-
base_value <- emfact <- marker_region_CLE_2020 <- marker_region_CLE_2030 <-
marker_region_SLE_2030 <- min_CLE_2030_low <- min_CLE_2030_weak_reg <-
min_SLE_2030_strong_reg <- policy <- prev <- region_grouping <- scaler <-
scenario <- stub.technology <- subsector <- supplysector <- value <-
variable <- varyear <- varyearpol <- year <- marker_value <- min_value <-
min_value <- multiplier <- . <- `2000` <- NULL # silence package check notes
all_data <- list(...)[[1]]
# Load required inputs
A_regions <- get_data(all_data, "emissions/A_regions")
GCAM_sector_tech <- get_data(all_data, "emissions/mappings/GCAM_sector_tech")
if (energy.TRAN_UCD_MODE == "rev.mode"){
GCAM_sector_tech <- get_data(all_data, "emissions/mappings/GCAM_sector_tech_Revised")
}
GAINS_sector <- get_data(all_data, "emissions/mappings/gains_to_gcam_sector")
GAINS_activities <- get_data(all_data, "emissions/GAINS_activities")
GAINS_emissions <- get_data(all_data, "emissions/GAINS_emissions") %>%
# NOTE: these are three different scenarios
# CLE = current legislation, SLE = stringent legislation, MFR = maximum feasible reductions
gather(scenario, value, CLE, MFR, SLE)
L102.pcgdp_thous90USD_Scen_R_Y <- get_data(all_data, "L102.pcgdp_thous90USD_Scen_R_Y")
L111.nonghg_tgej_R_en_S_F_Yh <- get_data(all_data, "L111.nonghg_tgej_R_en_S_F_Yh")
L114.bcoc_tgej_R_en_S_F_2000 <- get_data(all_data, "L114.bcoc_tgej_R_en_S_F_2000") %>%
gather(year, value, `2000`) %>%
mutate(year = as.integer(year))
A61_emfact_rules <- get_data(all_data, "emissions/A61_emfact_rules")
# ===================================================
# Aggregate GAINS emissions data by GCAM sector
GAINS_emissions_agg <- GAINS_emissions %>%
# Change pollutant names
mutate(POLL = replace(POLL, POLL == "NOX", "NOx"),
POLL = replace(POLL, POLL == "VOC", "NMVOC")) %>%
# Use left_join because NAs in GAINS_sector
left_join(GAINS_sector, by = c("TIMER_SECTOR" = "IIASA_Sector")) %>%
group_by(TIMER_REGION, agg_sector = GCAM_tag, POLL, IDYEARS, scenario) %>%
summarise(value = sum(value)) %>%
na.omit %>%
ungroup
# Aggregate GAINS activity data by GCAM sector
GAINS_activities_agg <- GAINS_activities %>%
# Use left_join because NAs in GAINS_sector
left_join(GAINS_sector, by = c("TIMER_SECTOR" = "IIASA_Sector")) %>%
group_by(TIMER_REGION, agg_sector = GCAM_tag, IDYEARS) %>%
summarise(ACT = sum(ACT)) %>%
na.omit %>%
ungroup
# Compute emissions factors by dividing GAINS activity by GAINS emissions
GAINS_emfact <- GAINS_emissions_agg %>%
left_join_error_no_match(GAINS_activities_agg, by = c("TIMER_REGION", "agg_sector", "IDYEARS")) %>%
mutate(emfact = value / ACT) %>%
select(TIMER_REGION, agg_sector, POLL, IDYEARS, scenario, emfact) %>%
group_by(TIMER_REGION, agg_sector, POLL, IDYEARS) %>%
# Using CLE scenario for base value
mutate(CLE_base = emfact[scenario == "CLE"]) %>%
ungroup() %>%
# Replace SLE & MFR base year (2005) emissions factors with CLE emissions factors.
# They don't all start from the same value.
mutate(emfact = replace(emfact, IDYEARS == emissions.GAINS_BASE_YEAR, CLE_base[IDYEARS == emissions.GAINS_BASE_YEAR])) %>%
select(-CLE_base)
# Compute emissions factor scaler.
# These scalers are relative to the previous time period's numbers.
GAINS_emfact_scaler <- GAINS_emfact %>%
group_by(TIMER_REGION, agg_sector, POLL, scenario) %>%
# Create column of previous time period value
mutate(prev = lag(emfact, n = 1L, order_by = IDYEARS)) %>%
ungroup() %>%
filter(IDYEARS > emissions.GAINS_BASE_YEAR) %>%
# Divide current value by previous value, not allowing value greater than 1 (emissions factors cannot increase with time)
mutate(scaler = emfact / prev,
scaler = replace(scaler, scaler > 1, 1)) %>%
group_by(TIMER_REGION, agg_sector, POLL, scenario) %>%
mutate(scaler = cumprod(scaler)) %>%
ungroup() %>%
select(GAINS_region = TIMER_REGION, IIASA_sector = agg_sector, Non.CO2 = POLL, scenario, year = IDYEARS, scaler)
# Determine region groupings
pcgdp <- L102.pcgdp_thous90USD_Scen_R_Y %>%
# We are trying to filter to 2010. This code (taking the last historical year) was necessary to
# pass the timeshift, but is really not what we want to be doing, since the years in this code
# are fairly set in stone right now
filter(scenario == "SSP4", year == HISTORICAL_YEARS[length(HISTORICAL_YEARS)]) %>%
mutate(value = value * gdp_deflator(HISTORICAL_YEARS[length(HISTORICAL_YEARS)], 1990),
region_grouping = if_else(value >= emissions.LOW_PCGDP, "highmed", "low"))
# Compute future emissions factors for GAINS scenarios
emfact_scaled <- L111.nonghg_tgej_R_en_S_F_Yh %>%
filter(year == emissions.GAINS_BASE_YEAR) %>%
# Add in BC/OC emissions factors, assumed that 2005 emissions factors are identical to 2000
bind_rows(L114.bcoc_tgej_R_en_S_F_2000 %>%
mutate(year = emissions.GAINS_BASE_YEAR)) %>%
# Add GAINS regions and sectors
left_join_error_no_match(A_regions %>% select(GCAM_region_ID, GAINS_region), by = "GCAM_region_ID") %>%
left_join(GCAM_sector_tech %>% select(supplysector, subsector, stub.technology, IIASA_sector),
by = c("supplysector", "subsector", "stub.technology")) %>%
# Remove non-IIASA sectors and technologies with 0 emissions factor in base year. No reason to read in future zeroes.
filter(!is.na(IIASA_sector), value != 0) %>%
rename(base_year = year, base_value = value) %>%
left_join(GAINS_emfact_scaler, by = c("GAINS_region", "IIASA_sector", "Non.CO2")) %>%
# Scale L111/L114 emissions factors to GAINS scalers
mutate(emfact = base_value * scaler) %>%
left_join_error_no_match(pcgdp %>% select(GCAM_region_ID, region_grouping), by = "GCAM_region_ID") %>%
na.omit() %>%
select(GCAM_region_ID, Non.CO2, supplysector, subsector, stub.technology, GAINS_region, IIASA_sector,
scenario, year, emfact, region_grouping)
# Create list of countries with strong regulation based on elec_coal SO2 emissions factor
coal_so2 <- tibble(GCAM_region_ID = A_regions$GCAM_region_ID) %>%
left_join(
emfact_scaled %>%
filter(year == emissions.GAINS_YEARS[length(emissions.GAINS_YEARS)],
IIASA_sector == "elec_coal", Non.CO2 == "SO2", scenario == "CLE"),
by = "GCAM_region_ID") %>%
mutate(policy = if_else(emfact <= emissions.COAL_SO2_THRESHOLD, "strong_reg", "weak_reg"),
policy = replace(policy, region_grouping == "low", "low")) %>%
# If region is missing a value, assume it is a weak_reg
replace_na(list(policy = "weak_reg")) %>%
select(GCAM_region_ID, policy)
# Group SSPs by whether we process them the same
SSP_groups <- tibble(SSP_group = c("1&5", "2", "3&4"))
# Add the rules for each region, gas, technology
EF_rules <- emfact_scaled %>%
# This is to have same starting point as old code, which begins with NA omitted 2010 values
filter(year == emissions.GAINS_YEARS[1]) %>%
na.omit %>%
select(-year, -emfact, -scenario) %>%
distinct() %>%
# Repeat for future years and SSP groups
repeat_add_columns(tibble(year = c(2010, 2030, 2050, 2100))) %>%
repeat_add_columns(SSP_groups) %>%
# Join with policy type, but only for SSP group 2
left_join_error_no_match(coal_so2, by = "GCAM_region_ID") %>%
mutate(policy = replace(policy, SSP_group != "2", NA)) %>%
# Join with rules-use left_join b/c there are NA values in A61_emfact_rules
left_join(A61_emfact_rules, by = c("region_grouping", "year", "SSP_group", "policy"))
# Create a tibble with just marker region values
# Marker region is Western Europe (13) - some values will be set to its emissions factors in future
marker_region_df <- emfact_scaled %>%
filter(GCAM_region_ID == gcam.WESTERN_EUROPE_CODE) %>%
select(-GCAM_region_ID, -GAINS_region, -region_grouping) %>%
rename(marker_value = emfact)
# Combine all emissions factors
EF_all <- emfact_scaled %>%
# This is to have same starting point as old code, which begins with NA omitted 2010 values
filter(year == emissions.GAINS_YEARS[1]) %>%
na.omit %>%
select(-year, -emfact) %>%
left_join(emfact_scaled, by = c("GCAM_region_ID", "Non.CO2", "supplysector", "subsector", "stub.technology",
"GAINS_region", "IIASA_sector", "scenario", "region_grouping")) %>%
# Add in policy
left_join_error_no_match(coal_so2, by = "GCAM_region_ID") %>%
# Use complete to fill out any region/technology/gas combos that do not have emissions factors for all years
complete(year, nesting(GCAM_region_ID, Non.CO2, supplysector, subsector,
stub.technology, GAINS_region, IIASA_sector, scenario, region_grouping, policy)) %>%
# Calculate minimum by technology/gas
group_by(Non.CO2, supplysector, subsector, stub.technology, IIASA_sector, scenario, year, policy, region_grouping) %>%
mutate(min_value = min(emfact, na.rm = TRUE)) %>%
ungroup %>%
# Add marker region values
left_join(marker_region_df, by = c("Non.CO2", "supplysector", "subsector", "stub.technology", "IIASA_sector", "scenario", "year")) %>%
gather(variable, value, emfact, marker_value, min_value) %>%
# Add binary columns indicating if value is marker value or minimum value
mutate(marker_region = if_else(variable == "marker_value", 1, 0),
min = if_else(variable == "min_value", 1, 0)) %>%
rename(multiplier_year = year, multiplier_scenario = scenario) %>%
select(-GAINS_region, -region_grouping, -variable, -policy)
SSP_EF <- EF_rules %>%
# Join rules with values
left_join(EF_all, by = c("GCAM_region_ID", "Non.CO2", "supplysector", "subsector", "stub.technology", "IIASA_sector",
"multiplier_scenario", "multiplier_year", "marker_region", "min")) %>%
# Multiply non-NA values by multipliers
mutate(value = replace(value, !is.na(value), multiplier[!is.na(value)] * value[!is.na(value)])) %>%
select(GCAM_region_ID, Non.CO2, supplysector, subsector, stub.technology,
agg_sector = IIASA_sector, year, value, SSP_group) %>%
group_by(GCAM_region_ID, Non.CO2, supplysector, subsector, stub.technology,
agg_sector, SSP_group) %>%
# Set NA values to previous (non-NA) value
mutate(value = if_else(is.na(value), lag(value, n = 1L, order_by = year), value),
value = if_else(is.na(value), lag(value, n = 1L, order_by = year), value),
value = if_else(is.na(value), lag(value, n = 1L, order_by = year), value),
# Emission factors cannot increase-if any increases, change it to value from previous time step
prev = lag(value, n = 1L, order_by = year),
value = if_else(value > prev & !is.na(prev), prev, value),
prev = lag(value, n = 1L, order_by = year),
value = if_else(value > prev & !is.na(prev), prev, value),
prev = lag(value, n = 1L, order_by = year),
value = if_else(value > prev & !is.na(prev), prev, value)) %>%
select(-prev) %>%
ungroup
# Split data by SSP grouping
out_df <- SSP_EF %>%
split(.$SSP_group) %>%
lapply(function(df) {
select(df, -SSP_group)
})
# Produce outputs
out_df[["1&5"]] %>%
add_title("Emissions factors for SSP 1 and SSP 5") %>%
add_units("Tg/EJ") %>%
add_comments("Scales future GAINS emissions factors to L111/L114 base year emissions factors, then applies to GCAM future years") %>%
add_legacy_name("L161.SSP15_EF") %>%
add_precursors("emissions/A_regions",
"emissions/mappings/GCAM_sector_tech",
"emissions/mappings/GCAM_sector_tech_Revised",
"emissions/mappings/gains_to_gcam_sector",
"emissions/GAINS_activities",
"emissions/GAINS_emissions",
"L102.pcgdp_thous90USD_Scen_R_Y",
"L111.nonghg_tgej_R_en_S_F_Yh",
"L114.bcoc_tgej_R_en_S_F_2000",
"emissions/A61_emfact_rules") ->
L161.SSP15_EF
out_df[["2"]] %>%
add_title("Emissions factors for SSP 2") %>%
add_units("Tg/EJ") %>%
add_comments("Scales future GAINS emissions factors to L111/L114 base year emissions factors, then applies to GCAM future years") %>%
add_legacy_name("L161.SSP2_EF") %>%
add_precursors("emissions/A_regions",
"emissions/mappings/GCAM_sector_tech",
"emissions/mappings/GCAM_sector_tech_Revised",
"emissions/mappings/gains_to_gcam_sector",
"emissions/GAINS_activities",
"emissions/GAINS_emissions",
"L102.pcgdp_thous90USD_Scen_R_Y",
"L111.nonghg_tgej_R_en_S_F_Yh",
"L114.bcoc_tgej_R_en_S_F_2000",
"emissions/A61_emfact_rules") ->
L161.SSP2_EF
out_df[["3&4"]] %>%
add_title("Emissions factors for SSP 3 and SSP 4") %>%
add_units("Tg/EJ") %>%
add_comments("Scales future GAINS emissions factors to L111/L114 base year emissions factors, then applies to GCAM future years") %>%
add_legacy_name("L161.SSP34_EF") %>%
add_precursors("emissions/A_regions",
"emissions/mappings/GCAM_sector_tech",
"emissions/mappings/GCAM_sector_tech_Revised",
"emissions/mappings/gains_to_gcam_sector",
"emissions/GAINS_activities",
"emissions/GAINS_emissions",
"L102.pcgdp_thous90USD_Scen_R_Y",
"L111.nonghg_tgej_R_en_S_F_Yh",
"L114.bcoc_tgej_R_en_S_F_2000",
"emissions/A61_emfact_rules") ->
L161.SSP34_EF
return_data(L161.SSP15_EF, L161.SSP2_EF, L161.SSP34_EF)
} else {
stop("Unknown command")
}
}
|
writeLinkageLayers <- function(){
r <- '\t\t\t<div id="control_layers_container" class="control_layers_container" >
<div id="control_layers_header" class="control_layers_header" >
<a id="control_layers_click" class="control_layers_click" onClick="javascript:show_hide_control(this);" >▼</a>
<a class="control_layers_text">Layers</a>
</div>
<div class="control_layer_subheader" >
<div class="control_layer_subheader_label" >Opacity</div>
<div style="margin:0px 0px 0px 55px;" >Layer names</div>
</div>
<div id="control_layers_main" class="control_layers_main" >
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level1"></a>
<a class="control_layer_text_nochild">Joints</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level1"></a>
<a class="control_layer_text_nochild">Points</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level1"></a>
<a class="control_layer_text_nochild">Joint wire frame</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level1"></a>
<a class="control_layer_text_nochild">Point wire frame</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level1"></a>
<a class="control_layer_text_nochild">Joint constraints</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level1"></a>
<a class="control_layer_text_nochild">Link coordinate systems</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level1"></a>
<a class="control_layer_layer_vis" onClick="javascript:show_hide_control(this);" >►</a>
<a class="control_layer_text">Coordinate box</a>
</div>
<div class="control_layer_main" style="display:none;" >
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=0 /></div>
<a class="control_layer_level2"></a>
<a class="control_layer_text_nochild">Bounding panel fill</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=40 /></div>
<a class="control_layer_level2"></a>
<a class="control_layer_text_nochild">Bounding panel outline</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=10 /></div>
<a class="control_layer_level2"></a>
<a class="control_layer_text_nochild">Grid</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=50 /></div>
<a class="control_layer_level2"></a>
<a class="control_layer_text_nochild">Ticks</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level2"></a>
<a class="control_layer_text_nochild">Tick labels</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level2"></a>
<a class="control_layer_text_nochild">Axis labels</a>
</div>
</div>
</div>
</div>
</div>
</div>
'
r
}
|
/R/writeLinkageLayers.R
|
no_license
|
aaronolsen/linkR
|
R
| false
| false
| 5,829
|
r
|
writeLinkageLayers <- function(){
r <- '\t\t\t<div id="control_layers_container" class="control_layers_container" >
<div id="control_layers_header" class="control_layers_header" >
<a id="control_layers_click" class="control_layers_click" onClick="javascript:show_hide_control(this);" >▼</a>
<a class="control_layers_text">Layers</a>
</div>
<div class="control_layer_subheader" >
<div class="control_layer_subheader_label" >Opacity</div>
<div style="margin:0px 0px 0px 55px;" >Layer names</div>
</div>
<div id="control_layers_main" class="control_layers_main" >
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level1"></a>
<a class="control_layer_text_nochild">Joints</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level1"></a>
<a class="control_layer_text_nochild">Points</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level1"></a>
<a class="control_layer_text_nochild">Joint wire frame</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level1"></a>
<a class="control_layer_text_nochild">Point wire frame</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level1"></a>
<a class="control_layer_text_nochild">Joint constraints</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level1"></a>
<a class="control_layer_text_nochild">Link coordinate systems</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level1"></a>
<a class="control_layer_layer_vis" onClick="javascript:show_hide_control(this);" >►</a>
<a class="control_layer_text">Coordinate box</a>
</div>
<div class="control_layer_main" style="display:none;" >
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=0 /></div>
<a class="control_layer_level2"></a>
<a class="control_layer_text_nochild">Bounding panel fill</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=40 /></div>
<a class="control_layer_level2"></a>
<a class="control_layer_text_nochild">Bounding panel outline</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=10 /></div>
<a class="control_layer_level2"></a>
<a class="control_layer_text_nochild">Grid</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=50 /></div>
<a class="control_layer_level2"></a>
<a class="control_layer_text_nochild">Ticks</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level2"></a>
<a class="control_layer_text_nochild">Tick labels</a>
</div>
</div>
<div class="control_layer_container" >
<div class="control_layer_header" >
<div class="control_layer_shape_vis_range" ><input type="range" class="control_layer_shape_vis_range" oninput="layer_visibility(this);" value=100 /></div>
<a class="control_layer_level2"></a>
<a class="control_layer_text_nochild">Axis labels</a>
</div>
</div>
</div>
</div>
</div>
</div>
'
r
}
|
#POL and REl Replication Files
# Emily K. Gade
# 20 May 2020
rm(list=ls())
library(foreign)
library(wordcloud)
library(betareg)
library(stargazer)
library(xtable)
library(ggplot2)
library(tidyverse)
library(reshape2)
library(scales)
library(pglm)
setwd("~/Desktop/Emory/pub_projects/Published/EKGJonJohn_SenateInsecruityPaper/")
selectdata2<-read.csv("polAndReligDataSEnate.csv", stringsAsFactors = F, header = T)
pdata <- pdata.frame(selectdata2, index=c("URLs", "year"))
##########################
#for POL and RELI Paper - final models:
##########################
## Table 1
realWorld_Relig <-betareg(pdata$frequency_religLWIC~ pdata$frequency_religLWIC_lag1 + pdata$femadec + pdata$terroristattack +
pdata$dw1 + pdata$Female + pdata$veryconservative + pdata$veryReligous
+ pdata$upforElection, data = pdata)
realWorld_Anx <-betareg(pdata$frequency_anxiouty~ pdata$frequency_anxiouty_lag1 + pdata$femadec + pdata$terroristattack +
pdata$globalterrorism + pdata$dw1 + pdata$Female + pdata$veryconservative + pdata$veryReligous
+ pdata$upforElection, data = pdata)
relig_allcontrols_axiety<-betareg(pdata$frequency_religLWIC~ pdata$frequency_religLWIC_lag1 + pdata$frequency_anxiouty + pdata$femadec + pdata$terroristattack +
pdata$globalterrorism + pdata$dw1 + pdata$Female + pdata$veryconservative + pdata$veryReligous
+ pdata$upforElection, data = pdata)
### print table 1
stargazer(realWorld_Anx, realWorld_Relig, relig_allcontrols_axiety)
########
#Word Clouds for each list
#####
#load frequency data
freqs2chapp<-read.csv("freqs_chapp.csv", header = T, stringsAsFactors = F)
freqs2LWIC<-read.csv("freqs_LWIC_rel.csv", header = T, stringsAsFactors = F)
freqs2dhs<-read.csv("freqs_DHS.csv", header = T, stringsAsFactors = F)
freqs2islam<-read.csv("freqs_islam.csv", header = T, stringsAsFactors = F)
freqs2opt<-read.csv("freqs_opt.csv", header = T, stringsAsFactors = F)
freqs2anx<-read.csv("freqs2anx.csv", header = T, stringsAsFactors = F)
### making clouds
#pdf(file="wordClouds_senate_relig_20May.pdf", paper="letter",width = 7,height = 5)
#par(mfrow=c(2,3))
wordcloud(freqs2chapp$word, freqs2chapp$freq, scale=c(4, .25),
random.order = FALSE, random.color = FALSE,
colors= c("indianred1","indianred2","indianred3","indianred"))
wordcloud(freqs2LWIC$word, freqs2LWIC$freq, scale=c(4, .4),
random.order = FALSE, random.color = FALSE,
colors= c("lightsteelblue1","lightsteelblue2","lightsteelblue3","lightsteelblue"))
wordcloud(freqs2dhs$word, freqs2dhs$freq, scale=c(4, .3),
random.order = FALSE, random.color = FALSE,
colors= c("goldenrod","goldenrod1","goldenrod2","goldenrod3"))
wordcloud(freqs2islam$word, freqs2islam$freq, scale=c(4, .5),
random.order = FALSE, random.color = FALSE,
colors= c("tomato","tomato1","tomato2","tomato3"))
wordcloud(freqs2anx$word, freqs2anx$freq, scale=c(4, .3),
random.order = FALSE, random.color = FALSE,
colors= c("seagreen1","seagreen2","seagreen3","seagreen4"))
wordcloud(freqs2opt$word, freqs2opt$freq, scale=c(4, .5),
random.order = FALSE, random.color = FALSE,
colors= c("cadetblue1","cadetblue","cadetblue3","cadetblue"))
#dev.off()
########
# descriptive stats
######
IVDV<-selectdata2
barplots_senator<- aggregate(list(IVDV$unafilated, IVDV$other, IVDV$jew, IVDV$mormon, IVDV$catholic, IVDV$protestantdemonation, IVDV$Republican),
by=list(IVDV$URLs), FUN = sum)
names(barplots_senator)<-c("URLS", "Unafiliated", "other", "jewish", "mormon", "catholic", "protestant", "republican")
barplots_senator[barplots_senator>0]<-1
Unaffil<-table(barplots_senator$Unafiliated, barplots_senator$republican)
Catholic<-table(barplots_senator$catholic, barplots_senator$republican)
Jewish<-table(barplots_senator$jewish, barplots_senator$republican)
Other<-table(barplots_senator$other, barplots_senator$republican)
Protestant<-table(barplots_senator$protestant, barplots_senator$republican)
Mormon<-table(barplots_senator$mormon, barplots_senator$republican)
Unaffil<-Unaffil[2,]
Catholic<-Catholic[2,]
Jewish<-Jewish[2,]
Other<-Other[2,]
Mormon<-Mormon[2,]
Protestant<-Protestant[2,]
religtab<-data.frame(rbind(Unaffil, Catholic, Jewish, Other, Mormon, Protestant))
names(religtab)<-c("Dems", "Repub")
religtab$relig <- row.names(religtab)
mdfr <- melt(religtab, id.vars = "relig")
names(mdfr)<-c("Religion", "Party", "Count")
p <- ggplot(mdfr, aes(Religion, Count, fill = Party)) +
geom_col()
p + scale_fill_manual(values=c( "blue", "red")) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
scale_y_continuous(labels = percent)
#####
Vars<- c("% Very Religious (State)", "% Very Conservative (State)", "% Evangelical (State)",
"Conservatism (Senator)", "Jewish Faith (Senator)", "Mormon Faith (Senator)", "Female (Senator)",
"Up For Election", "Republican (Senator)")
Range<-list(
range(IVDV$veryReligous), range(IVDV$veryconservative),
range(na.omit(IVDV$evang_state)),
range(IVDV$dw1), range(IVDV$jew), range(IVDV$mormon),
range(IVDV$Female), range(IVDV$upforElection), range(IVDV$Republican))
Mean <- c(
mean(IVDV$veryReligous), mean(IVDV$veryconservative),
mean(na.omit(IVDV$evang_state)),
mean(IVDV$dw1), mean(IVDV$jew), mean(IVDV$mormon),
mean(IVDV$Female), mean(IVDV$upforElection), mean(IVDV$Republican))
SD<- c(
sd(IVDV$veryReligous), sd(IVDV$veryconservative), sd(na.omit(IVDV$evang_state)),
sd(IVDV$dw1), sd(IVDV$jew), sd(IVDV$mormon), sd(IVDV$Female), sd(IVDV$upforElection), sd(IVDV$Republican))
a<-unlist(lapply(Range, `[[`, 1))
b<-unlist(lapply(Range, `[[`, 2))
descr_stats<-data.frame(Vars, a, b, Mean, SD)
xtable(descr_stats, digits = 2)
#################
# Robustness Checks
#################
######
#Bivariate relationships
########
biv_anx <-betareg(pdata$frequency_religLWIC~pdata$frequency_anxiouty)
biv_dhs <-betareg(pdata$frequency_religLWIC~pdata$frequency_dhs)
stargazer(biv_anx, biv_dhs)
######
# with Chapp Words
####
relig_allcontrols_Chapp<-betareg(pdata$frequency_chapp ~ pdata$frequency_religLWIC_lag1 + pdata$frequency_anxiouty + pdata$femadec + pdata$terroristattack +
pdata$globalterrorism +pdata$frequency_dhs + pdata$frequency_opt + pdata$dw1 + pdata$Female + pdata$veryconservative + pdata$veryReligous
+ pdata$upforElection, data = pdata)
stargazer(relig_allcontrols_Chapp)
## no islam
relig_allcontrols_NoIslam<-betareg(pdata$frequency_reli_Lwicnoislam~ pdata$frequency_religLWIC_lag1 + pdata$frequency_anxiouty + pdata$femadec + pdata$terroristattack +
pdata$globalterrorism + pdata$dw1 + pdata$Female + pdata$veryconservative + pdata$veryReligous
+ pdata$upforElection, data = pdata)
stargazer(relig_allcontrols_NoIslam)
### with other types of text measures
relig_alltext<-betareg(pdata$frequency_religLWIC~ pdata$frequency_religLWIC_lag1 + pdata$frequency_anxiouty + pdata$frequency_dhs + pdata$frequency_opt + pdata$femadec + pdata$terroristattack +
pdata$globalterrorism + pdata$veryReligous + pdata$dw1 + pdata$Female + pdata$veryconservative + pdata$veryReligous
+ pdata$upforElection, data = pdata)
stargazer(relig_alltext)
|
/PolAndReligionRepFiles.R
|
no_license
|
ekgade/PoliticsAndRelgion_RepFiles
|
R
| false
| false
| 7,837
|
r
|
#POL and REl Replication Files
# Emily K. Gade
# 20 May 2020
rm(list=ls())
library(foreign)
library(wordcloud)
library(betareg)
library(stargazer)
library(xtable)
library(ggplot2)
library(tidyverse)
library(reshape2)
library(scales)
library(pglm)
setwd("~/Desktop/Emory/pub_projects/Published/EKGJonJohn_SenateInsecruityPaper/")
selectdata2<-read.csv("polAndReligDataSEnate.csv", stringsAsFactors = F, header = T)
pdata <- pdata.frame(selectdata2, index=c("URLs", "year"))
##########################
#for POL and RELI Paper - final models:
##########################
## Table 1
realWorld_Relig <-betareg(pdata$frequency_religLWIC~ pdata$frequency_religLWIC_lag1 + pdata$femadec + pdata$terroristattack +
pdata$dw1 + pdata$Female + pdata$veryconservative + pdata$veryReligous
+ pdata$upforElection, data = pdata)
realWorld_Anx <-betareg(pdata$frequency_anxiouty~ pdata$frequency_anxiouty_lag1 + pdata$femadec + pdata$terroristattack +
pdata$globalterrorism + pdata$dw1 + pdata$Female + pdata$veryconservative + pdata$veryReligous
+ pdata$upforElection, data = pdata)
relig_allcontrols_axiety<-betareg(pdata$frequency_religLWIC~ pdata$frequency_religLWIC_lag1 + pdata$frequency_anxiouty + pdata$femadec + pdata$terroristattack +
pdata$globalterrorism + pdata$dw1 + pdata$Female + pdata$veryconservative + pdata$veryReligous
+ pdata$upforElection, data = pdata)
### print table 1
stargazer(realWorld_Anx, realWorld_Relig, relig_allcontrols_axiety)
########
#Word Clouds for each list
#####
#load frequency data
freqs2chapp<-read.csv("freqs_chapp.csv", header = T, stringsAsFactors = F)
freqs2LWIC<-read.csv("freqs_LWIC_rel.csv", header = T, stringsAsFactors = F)
freqs2dhs<-read.csv("freqs_DHS.csv", header = T, stringsAsFactors = F)
freqs2islam<-read.csv("freqs_islam.csv", header = T, stringsAsFactors = F)
freqs2opt<-read.csv("freqs_opt.csv", header = T, stringsAsFactors = F)
freqs2anx<-read.csv("freqs2anx.csv", header = T, stringsAsFactors = F)
### making clouds
#pdf(file="wordClouds_senate_relig_20May.pdf", paper="letter",width = 7,height = 5)
#par(mfrow=c(2,3))
wordcloud(freqs2chapp$word, freqs2chapp$freq, scale=c(4, .25),
random.order = FALSE, random.color = FALSE,
colors= c("indianred1","indianred2","indianred3","indianred"))
wordcloud(freqs2LWIC$word, freqs2LWIC$freq, scale=c(4, .4),
random.order = FALSE, random.color = FALSE,
colors= c("lightsteelblue1","lightsteelblue2","lightsteelblue3","lightsteelblue"))
wordcloud(freqs2dhs$word, freqs2dhs$freq, scale=c(4, .3),
random.order = FALSE, random.color = FALSE,
colors= c("goldenrod","goldenrod1","goldenrod2","goldenrod3"))
wordcloud(freqs2islam$word, freqs2islam$freq, scale=c(4, .5),
random.order = FALSE, random.color = FALSE,
colors= c("tomato","tomato1","tomato2","tomato3"))
wordcloud(freqs2anx$word, freqs2anx$freq, scale=c(4, .3),
random.order = FALSE, random.color = FALSE,
colors= c("seagreen1","seagreen2","seagreen3","seagreen4"))
wordcloud(freqs2opt$word, freqs2opt$freq, scale=c(4, .5),
random.order = FALSE, random.color = FALSE,
colors= c("cadetblue1","cadetblue","cadetblue3","cadetblue"))
#dev.off()
########
# descriptive stats
######
IVDV<-selectdata2
barplots_senator<- aggregate(list(IVDV$unafilated, IVDV$other, IVDV$jew, IVDV$mormon, IVDV$catholic, IVDV$protestantdemonation, IVDV$Republican),
by=list(IVDV$URLs), FUN = sum)
names(barplots_senator)<-c("URLS", "Unafiliated", "other", "jewish", "mormon", "catholic", "protestant", "republican")
barplots_senator[barplots_senator>0]<-1
Unaffil<-table(barplots_senator$Unafiliated, barplots_senator$republican)
Catholic<-table(barplots_senator$catholic, barplots_senator$republican)
Jewish<-table(barplots_senator$jewish, barplots_senator$republican)
Other<-table(barplots_senator$other, barplots_senator$republican)
Protestant<-table(barplots_senator$protestant, barplots_senator$republican)
Mormon<-table(barplots_senator$mormon, barplots_senator$republican)
Unaffil<-Unaffil[2,]
Catholic<-Catholic[2,]
Jewish<-Jewish[2,]
Other<-Other[2,]
Mormon<-Mormon[2,]
Protestant<-Protestant[2,]
religtab<-data.frame(rbind(Unaffil, Catholic, Jewish, Other, Mormon, Protestant))
names(religtab)<-c("Dems", "Repub")
religtab$relig <- row.names(religtab)
mdfr <- melt(religtab, id.vars = "relig")
names(mdfr)<-c("Religion", "Party", "Count")
p <- ggplot(mdfr, aes(Religion, Count, fill = Party)) +
geom_col()
p + scale_fill_manual(values=c( "blue", "red")) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
scale_y_continuous(labels = percent)
#####
Vars<- c("% Very Religious (State)", "% Very Conservative (State)", "% Evangelical (State)",
"Conservatism (Senator)", "Jewish Faith (Senator)", "Mormon Faith (Senator)", "Female (Senator)",
"Up For Election", "Republican (Senator)")
Range<-list(
range(IVDV$veryReligous), range(IVDV$veryconservative),
range(na.omit(IVDV$evang_state)),
range(IVDV$dw1), range(IVDV$jew), range(IVDV$mormon),
range(IVDV$Female), range(IVDV$upforElection), range(IVDV$Republican))
Mean <- c(
mean(IVDV$veryReligous), mean(IVDV$veryconservative),
mean(na.omit(IVDV$evang_state)),
mean(IVDV$dw1), mean(IVDV$jew), mean(IVDV$mormon),
mean(IVDV$Female), mean(IVDV$upforElection), mean(IVDV$Republican))
SD<- c(
sd(IVDV$veryReligous), sd(IVDV$veryconservative), sd(na.omit(IVDV$evang_state)),
sd(IVDV$dw1), sd(IVDV$jew), sd(IVDV$mormon), sd(IVDV$Female), sd(IVDV$upforElection), sd(IVDV$Republican))
a<-unlist(lapply(Range, `[[`, 1))
b<-unlist(lapply(Range, `[[`, 2))
descr_stats<-data.frame(Vars, a, b, Mean, SD)
xtable(descr_stats, digits = 2)
#################
# Robustness Checks
#################
######
#Bivariate relationships
########
biv_anx <-betareg(pdata$frequency_religLWIC~pdata$frequency_anxiouty)
biv_dhs <-betareg(pdata$frequency_religLWIC~pdata$frequency_dhs)
stargazer(biv_anx, biv_dhs)
######
# with Chapp Words
####
relig_allcontrols_Chapp<-betareg(pdata$frequency_chapp ~ pdata$frequency_religLWIC_lag1 + pdata$frequency_anxiouty + pdata$femadec + pdata$terroristattack +
pdata$globalterrorism +pdata$frequency_dhs + pdata$frequency_opt + pdata$dw1 + pdata$Female + pdata$veryconservative + pdata$veryReligous
+ pdata$upforElection, data = pdata)
stargazer(relig_allcontrols_Chapp)
## no islam
relig_allcontrols_NoIslam<-betareg(pdata$frequency_reli_Lwicnoislam~ pdata$frequency_religLWIC_lag1 + pdata$frequency_anxiouty + pdata$femadec + pdata$terroristattack +
pdata$globalterrorism + pdata$dw1 + pdata$Female + pdata$veryconservative + pdata$veryReligous
+ pdata$upforElection, data = pdata)
stargazer(relig_allcontrols_NoIslam)
### with other types of text measures
relig_alltext<-betareg(pdata$frequency_religLWIC~ pdata$frequency_religLWIC_lag1 + pdata$frequency_anxiouty + pdata$frequency_dhs + pdata$frequency_opt + pdata$femadec + pdata$terroristattack +
pdata$globalterrorism + pdata$veryReligous + pdata$dw1 + pdata$Female + pdata$veryconservative + pdata$veryReligous
+ pdata$upforElection, data = pdata)
stargazer(relig_alltext)
|
#!/usr/bin/env R
r = getOption("repos")
r["CRAN"] = "http://cran.us.r-project.org"
options(repos = r)
# Install packages
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
if (!requireNamespace("GenomicFeatures", quietly = TRUE))
BiocManager::install("GenomicFeatures")
if (!requireNamespace("Rsamtools", quietly = TRUE))
BiocManager::install("Rsamtools")
if (!requireNamespace("GenomicAlignments", quietly = TRUE))
BiocManager::install("GenomicAlignments")
if (!requireNamespace("BiocParallel", quietly = TRUE))
BiocManager::install("BiocParallel")
if (!requireNamespace("rtracklayer", quietly = TRUE))
BiocManager::install("rtracklayer")
if (!requireNamespace("DESeq2", quietly = TRUE))
BiocManager::install("DESeq2")
if (!requireNamespace("gplots", quietly = TRUE))
install.packages("gplots")
# Import packages
suppressMessages(library("Rsamtools"))
suppressMessages(library("GenomicFeatures"))
suppressMessages(library("GenomicAlignments"))
suppressMessages(library("BiocParallel"))
suppressMessages(library("DESeq2"))
suppressMessages(library("gplots"))
suppressMessages(library("rtracklayer"))
# Set up features for SummarizeOverlaps
gff0 <- import(snakemake@input[["gtf"]])
idx <- mcols(gff0)$source == "protein_coding" & mcols(gff0)$type == "exon" & (seqnames(gff0) %in% c("X", "2L", "2R", "3L", "3R", "4", "YHet"))
gff <- gff0[idx]
genes <- split(gff, mcols(gff)$gene_id)
# Import table with columns FileName SRRNumber GEOAccession Status SampleName
cat("- Importing samples table\n")
samples_table <- read.table(file=snakemake@input[["samples_table"]], sep="\t", header=TRUE)
# Create txdb object
# cat("- Creating txdb object\n")
# txdb <- makeTxDbFromGFF(file=snakemake@input[["gtf"]], format="gtf", organism="Drosophila melanogaster")
# organize exons by gene
# cat("- Organizing exons by gene\n")
# eByg <- exonsBy(txdb, by="gene")
# Import bam files
cat("- Importing bam files\n")
cat(snakemake@input[["bam_files"]])
# bam_names <- as.character(samples_table$FileName)
bam_files <- BamFileList(snakemake@input[["bam_files"]])
# SummarizeOverlaps
cat("- Running SummarizeOverlaps\n")
register(MulticoreParam(multicoreWorkers()))
se <- summarizeOverlaps(features=genes, reads=bam_files, mode="Union", singleEnd=TRUE, ignore.strand=FALSE)
# Format se
colData(se) <- DataFrame(samples_table)
colnames(se) <- samples_table$Accession
# rowData(se) <- names(rowRanges(se))
# Alternative to the above method
# colnames(rowRanges(se)) <- "id" #the colname is changed to "FBtr"
#---------------------------------
# Differential expression analysis
#---------------------------------
cat("- Performing DGE\n")
# Build DESeq Data set
dds <- DESeqDataSet(se, design= ~ Environment)
# Regularized log ratio
rld <- rlog(dds)
# Measure euclidean distance
d <- dist(t(assay(rld)))
# Agglomerate using complete distance
hc <- hclust(d)
# Plot dendogram
dend = as.dendrogram(hc)
png(snakemake@output[["dendogram"]])
plot(dend)
dev.off()
# PCA
png(snakemake@output[["pca"]])
plotPCA(rld, intgroup = "Environment")
dev.off()
# Generate results table
dds <- DESeq(dds)
resD <- results(dds, alpha=0.05)
# Save DE genes in csv file
resDSort <- resD[order(resD$padj),]
topDESeq2 <- resDSort[1:395,]
write.csv(topDESeq2, file=snakemake@output[["dge_table"]])
# MA plot
png(file=snakemake@output[["ma_plot"]])
plotMA(resD, ylim=c(-7,7))
dev.off()
cat("- DGE done\n")
save.image(file=snakemake@output[["r_image"]])
|
/scripts/exp-analysis.R
|
no_license
|
alejoaa/biof-term-project
|
R
| false
| false
| 3,498
|
r
|
#!/usr/bin/env R
r = getOption("repos")
r["CRAN"] = "http://cran.us.r-project.org"
options(repos = r)
# Install packages
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
if (!requireNamespace("GenomicFeatures", quietly = TRUE))
BiocManager::install("GenomicFeatures")
if (!requireNamespace("Rsamtools", quietly = TRUE))
BiocManager::install("Rsamtools")
if (!requireNamespace("GenomicAlignments", quietly = TRUE))
BiocManager::install("GenomicAlignments")
if (!requireNamespace("BiocParallel", quietly = TRUE))
BiocManager::install("BiocParallel")
if (!requireNamespace("rtracklayer", quietly = TRUE))
BiocManager::install("rtracklayer")
if (!requireNamespace("DESeq2", quietly = TRUE))
BiocManager::install("DESeq2")
if (!requireNamespace("gplots", quietly = TRUE))
install.packages("gplots")
# Import packages
suppressMessages(library("Rsamtools"))
suppressMessages(library("GenomicFeatures"))
suppressMessages(library("GenomicAlignments"))
suppressMessages(library("BiocParallel"))
suppressMessages(library("DESeq2"))
suppressMessages(library("gplots"))
suppressMessages(library("rtracklayer"))
# Set up features for SummarizeOverlaps
gff0 <- import(snakemake@input[["gtf"]])
idx <- mcols(gff0)$source == "protein_coding" & mcols(gff0)$type == "exon" & (seqnames(gff0) %in% c("X", "2L", "2R", "3L", "3R", "4", "YHet"))
gff <- gff0[idx]
genes <- split(gff, mcols(gff)$gene_id)
# Import table with columns FileName SRRNumber GEOAccession Status SampleName
cat("- Importing samples table\n")
samples_table <- read.table(file=snakemake@input[["samples_table"]], sep="\t", header=TRUE)
# Create txdb object
# cat("- Creating txdb object\n")
# txdb <- makeTxDbFromGFF(file=snakemake@input[["gtf"]], format="gtf", organism="Drosophila melanogaster")
# organize exons by gene
# cat("- Organizing exons by gene\n")
# eByg <- exonsBy(txdb, by="gene")
# Import bam files
cat("- Importing bam files\n")
cat(snakemake@input[["bam_files"]])
# bam_names <- as.character(samples_table$FileName)
bam_files <- BamFileList(snakemake@input[["bam_files"]])
# SummarizeOverlaps
cat("- Running SummarizeOverlaps\n")
register(MulticoreParam(multicoreWorkers()))
se <- summarizeOverlaps(features=genes, reads=bam_files, mode="Union", singleEnd=TRUE, ignore.strand=FALSE)
# Format se
colData(se) <- DataFrame(samples_table)
colnames(se) <- samples_table$Accession
# rowData(se) <- names(rowRanges(se))
# Alternative to the above method
# colnames(rowRanges(se)) <- "id" #the colname is changed to "FBtr"
#---------------------------------
# Differential expression analysis
#---------------------------------
cat("- Performing DGE\n")
# Build DESeq Data set
dds <- DESeqDataSet(se, design= ~ Environment)
# Regularized log ratio
rld <- rlog(dds)
# Measure euclidean distance
d <- dist(t(assay(rld)))
# Agglomerate using complete distance
hc <- hclust(d)
# Plot dendogram
dend = as.dendrogram(hc)
png(snakemake@output[["dendogram"]])
plot(dend)
dev.off()
# PCA
png(snakemake@output[["pca"]])
plotPCA(rld, intgroup = "Environment")
dev.off()
# Generate results table
dds <- DESeq(dds)
resD <- results(dds, alpha=0.05)
# Save DE genes in csv file
resDSort <- resD[order(resD$padj),]
topDESeq2 <- resDSort[1:395,]
write.csv(topDESeq2, file=snakemake@output[["dge_table"]])
# MA plot
png(file=snakemake@output[["ma_plot"]])
plotMA(resD, ylim=c(-7,7))
dev.off()
cat("- DGE done\n")
save.image(file=snakemake@output[["r_image"]])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_eval.R
\name{plotcorr}
\alias{plotcorr}
\title{Plot of correlations (numerical variables)}
\usage{
plotcorr(object, limit = 10)
}
\arguments{
\item{object}{An object of class ccdata}
\item{limit}{Number of variables to display by descending value (default=10)}
}
\value{
A bar plot of the correlations between numerical variables and the cluster vector.
}
\description{
Plot of correlations (numerical variables)
}
\examples{
data(BankCustomer)
obj <- Dataset(BankCustomer, BankCustomer$Cluster)
plotcorr(obj)
}
|
/man/plotcorr.Rd
|
no_license
|
adrienPAVOINE/ClustCheck
|
R
| false
| true
| 601
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_eval.R
\name{plotcorr}
\alias{plotcorr}
\title{Plot of correlations (numerical variables)}
\usage{
plotcorr(object, limit = 10)
}
\arguments{
\item{object}{An object of class ccdata}
\item{limit}{Number of variables to display by descending value (default=10)}
}
\value{
A bar plot of the correlations between numerical variables and the cluster vector.
}
\description{
Plot of correlations (numerical variables)
}
\examples{
data(BankCustomer)
obj <- Dataset(BankCustomer, BankCustomer$Cluster)
plotcorr(obj)
}
|
\name{ignizio.example.3.1}
\docType{data}
\alias{ignizio.example.3.1}
\title{Ignizio (1976) Example Data Sets}
\description{
The data set that corresponds to Example 3-1
found in Ignizio (1976). These are
examples of goal programming problems solved
using the methods described by author.
Find
\eqn{ {\mathbf{x}'} = [ x_1, x_2 ] },
\eqn{ {\mathbf{n}'} = [ n_1, n_2, n_3 ] } and
\eqn{ {\mathbf{p}'} = [ p_1, p_2, p_3 ] } that minimize
\eqn{ \mathbf{a} = [ (2 p_1), (n_2), (n_3) ] } \cr
The objectives are as follows \cr
\eqn{ 10 x_1 + 15 x_2 + n_1 - p_1 = 40 } \cr
\eqn{100 x_1 + 100 x_2 + n_2 - p_2 = 1000 } \cr
\eqn{ x_2 + n_3 - p_3 = 7} \cr
\eqn{ \mathbf{x}, \mathbf{n}, \mathbf{p} \ge \mathbf{0} } \cr
The solution is \eqn{ {\mathbf{x}'} = [ 4, 0 ] } and
\eqn{ \mathbf{a} = [ 0, 600, 7] } \cr
}
\format{
The data set is an R file that creates the coefficients matrix,
the vector of target values and the data frame of achievement
goals.
}
\references{
Ignizio, J. P. (1976). Goal Programming and Extensions, Lexington Books.
}
\author{ Frederick Novomestky \email{fnovomes@poly.edu} }
\examples{
data( ignizio.example.3.1 )
soln <- llgp( coefficients, targets, achievements )
}
\seealso{
\code{\link{ignizio.datasets}}
}
\keyword{datasets}
|
/man/ignizio.example.3.1.Rd
|
no_license
|
Bhanditz/goalprog
|
R
| false
| false
| 1,358
|
rd
|
\name{ignizio.example.3.1}
\docType{data}
\alias{ignizio.example.3.1}
\title{Ignizio (1976) Example Data Sets}
\description{
The data set that corresponds to Example 3-1
found in Ignizio (1976). These are
examples of goal programming problems solved
using the methods described by author.
Find
\eqn{ {\mathbf{x}'} = [ x_1, x_2 ] },
\eqn{ {\mathbf{n}'} = [ n_1, n_2, n_3 ] } and
\eqn{ {\mathbf{p}'} = [ p_1, p_2, p_3 ] } that minimize
\eqn{ \mathbf{a} = [ (2 p_1), (n_2), (n_3) ] } \cr
The objectives are as follows \cr
\eqn{ 10 x_1 + 15 x_2 + n_1 - p_1 = 40 } \cr
\eqn{100 x_1 + 100 x_2 + n_2 - p_2 = 1000 } \cr
\eqn{ x_2 + n_3 - p_3 = 7} \cr
\eqn{ \mathbf{x}, \mathbf{n}, \mathbf{p} \ge \mathbf{0} } \cr
The solution is \eqn{ {\mathbf{x}'} = [ 4, 0 ] } and
\eqn{ \mathbf{a} = [ 0, 600, 7] } \cr
}
\format{
The data set is an R file that creates the coefficients matrix,
the vector of target values and the data frame of achievement
goals.
}
\references{
Ignizio, J. P. (1976). Goal Programming and Extensions, Lexington Books.
}
\author{ Frederick Novomestky \email{fnovomes@poly.edu} }
\examples{
data( ignizio.example.3.1 )
soln <- llgp( coefficients, targets, achievements )
}
\seealso{
\code{\link{ignizio.datasets}}
}
\keyword{datasets}
|
# ROUND VALUES ------------------------------------------------------------
# R uses 'Round half to even' rule for rounding.
# 'Round half to even' is a tie-breaking rule that is less biased.
# x = c(1.85, 1.54, 1.65, 1.75, 1.85, 1.84, 1)
# (round(x, 1))
# 'Round half away from zero' will be used for results output.
# https://github.com/pedroguarderas/learnR/blob/master/R_base/session_26.R
roundHAFZ <- function(x, n = 1) {
posneg = sign(x)
z = abs(x)*10^n
z = z + 0.5
z = trunc(z)
z = z/10^n
z*posneg
}
# (roundHAFZ(x))
# ROUND P-VALUE -----------------------------------------------------------
roundPVAL <- function(pval){
# present small p-values as "<0.001",
# other p-values will have 3 digits after decimal point
if(length(pval) == 1){
pval = roundHAFZ(pval, 3)
if(pval == 0){
pval = paste("<0.001")
}
} else{
for(i in 1:length(pval)){
if(pval[i] == 0){pval[i] = "<0.001"}
else pval[i] = pval[i]
}
}
return(pval)
}
# AJCC / GRADES 2004+ -----------------------------------------------------
# only works specifically for SEER data
fun.ajcc_grade <- function(df){
# creates 'Stage_Grade' variable with 5 possible values = { 1 / 2 / 3 / 4 / Unknown / Other }
# to unify levels of 2 different variables: AJCC Stage 6th / 7th ed. and Grade
# example:
# df = seer
if(min(df$Year_of_diagnosis) < 2004){
stop("Minimum 'Year of diagnosis' has to be not less than 2004.")
}
if(ajcc_grade == "AJCC" && ajcc_ed == "6th and 7th editions"){
df$Stage_Grade_6th <- NA
df$Stage_Grade_6th[str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "IV") == 1] <- "4"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th) & str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "III") == 1] <- "3"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th) & str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "II") == 1] <- "2"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th) & str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "I") == 1] <- "1"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th)] <- as.character(df$Derived_AJCC_Stage_Group_6th_ed_2004[is.na(df$Stage_Grade_6th)])
df$Stage_Grade_7th <- NA
df$Stage_Grade_7th[str_count(df$Derived_AJCC_Stage_Group_7th_ed_2010, "IV") == 1] <- "4"
df$Stage_Grade_7th[is.na(df$Stage_Grade_7th) & str_count(df$Derived_AJCC_Stage_Group_7th_ed_2010, "III") == 1] <- "3"
df$Stage_Grade_7th[is.na(df$Stage_Grade_7th) & str_count(df$Derived_AJCC_Stage_Group_7th_ed_2010, "II") == 1] <- "2"
df$Stage_Grade_7th[is.na(df$Stage_Grade_7th) & str_count(df$Derived_AJCC_Stage_Group_7th_ed_2010, "I") == 1] <- "1"
df$Stage_Grade_7th[is.na(df$Stage_Grade_7th)] <- as.character(df$Derived_AJCC_Stage_Group_7th_ed_2010[is.na(df$Stage_Grade_7th)])
df$Stage_Grade <- NA
df$Stage_Grade[df$Year_of_diagnosis %in% 2004:2009] <- df$Stage_Grade_6th[df$Year_of_diagnosis %in% 2004:2009]
df$Stage_Grade[df$Year_of_diagnosis >= 2010] <- df$Stage_Grade_7th[df$Year_of_diagnosis >= 2010]
# "UNK Stage" - cases that do not have enough information to be staged.
df$Stage_Grade[df$Stage_Grade == "UNK Stage"] <- "Unknown"
# "Other" than 1 / 2 / 3 / 4 / "Unknown": includes "Not applicable" & "OCCULT"
# "NA" (not applicable) is not an AJCC stage category. It is used as a placeholder for histologies that AJCC does not stage.
# In "OCCULT" stage NSCLC, cancer cells are found in the sputum (mucus from the lungs), but tumors are not immediately visible.
df$Stage_Grade[!(df$Stage_Grade %in% c("1", "2", "3", "4", "Unknown"))] <- "Other"
} else if(ajcc_grade == "AJCC" && ajcc_ed == "6th edition"){
df$Stage_Grade_6th <- NA
df$Stage_Grade_6th[str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "IV") == 1] <- "4"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th) & str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "III") == 1] <- "3"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th) & str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "II") == 1] <- "2"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th) & str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "I") == 1] <- "1"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th)] <- as.character(df$Derived_AJCC_Stage_Group_6th_ed_2004[is.na(df$Stage_Grade_6th)])
df$Stage_Grade_7th <- NA
df$Stage_Grade <- df$Stage_Grade_6th
# "UNK Stage" - cases that do not have enough information to be staged.
df$Stage_Grade[df$Stage_Grade == "UNK Stage"] <- "Unknown"
# "Other" than 1 / 2 / 3 / 4 / "Unknown": includes "Not applicable" & "OCCULT"
# "NA" (not applicable) is not an AJCC stage category. It is used as a placeholder for histologies that AJCC does not stage.
# In "OCCULT" stage NSCLC, cancer cells are found in the sputum (mucus from the lungs), but tumors are not immediately visible.
df$Stage_Grade[!(df$Stage_Grade %in% c("1", "2", "3", "4", "Unknown"))] <- "Other"
} else if(ajcc_grade == "Grade"){
df$Stage_Grade_6th <- NA
df$Stage_Grade_7th <- NA
df$Stage_Grade <- NA
df$Stage_Grade[str_count(df$Grade, "IV") == 1] <- "4"
df$Stage_Grade[is.na(df$Stage_Grade) & str_count(df$Grade, "III") == 1] <- "3"
df$Stage_Grade[is.na(df$Stage_Grade) & str_count(df$Grade, "II") == 1] <- "2"
df$Stage_Grade[is.na(df$Stage_Grade) & str_count(df$Grade, "I") == 1] <- "1"
df$Stage_Grade[is.na(df$Stage_Grade)] <- as.character(df$Grade[is.na(df$Stage_Grade)])
# Includes: 'T-cell' / 'B-cell; pre-B; B-precursor' / 'Null cell; non T-non B' / 'NK cell; natural killer cell (1995+)'
df$Stage_Grade[!(df$Stage_Grade %in% c("1", "2", "3", "4", "Unknown"))] <- "Other"
}
return(df)
}
# TABLE 1 -----------------------------------------------------------------
fun.tableone <- function(df, var_cont, var_cat, var_strata){
# example:
# var_cont = "Age_at_diagnosis"
# var_cat = c("Sex", "Summary_stage_2000_1998", "Stage_Grade")
# var_strata = "Race_Ethnicity"
# df = reg_inc
if(!is.null(var_cont)){
df_cont <- data.frame(matrix(nrow = 0, ncol = length(levels(df[, var_strata])) + 2))
for(i in 1:length(var_cont)){
# continuous variables
cont_strata <- data.frame(groups = aggregate(df[, var_cont[i]] ~ df[, var_strata], df, mean)[, 1],
mean = roundHAFZ(aggregate(df[, var_cont[i]] ~ df[, var_strata], df, mean)[, 2]),
sd = paste0("(", roundHAFZ(aggregate(df[, var_cont[i]] ~ df[, var_strata], df, sd)[, 2]), ")"))
cont_all <- data.frame(groups = "Overall",
mean = roundHAFZ(mean(df[, var_cont[i]])),
sd = paste0("(", roundHAFZ(sd(df[, var_cont[i]])), ")"))
cont <- data.frame(rbind(cont_strata, cont_all))
cont$stat <- paste(format(cont$mean, big.mark = ","), cont$sd)
cont <- cont[, c("groups", "stat")]
var_name <- names(df)[which(names(df) %in% var_cont[i])]
cont <- t(rbind(cbind(groups = "Characteristics", stat = paste(var_name, "/ (mean(sd))")), cont))
col_name <- cont[1, ]
cont <- data.frame(cont, row.names = NULL)[-1, ]
names(cont) <- col_name
names(df_cont) <- col_name
df_cont <- rbind(df_cont, cont)
}
} else df_cont <- NULL
if(!is.null(var_cat)){
df_cat <- data.frame(matrix(nrow = 0, ncol = length(levels(df[, var_strata])) + 2))
for(i in 1:length(var_cat)){
# categorical variables
cat_strata <- data.frame(xtabs(~ df[, var_cat[i]] + df[, var_strata], addNA = TRUE))
names(cat_strata) <- c("var_name", "strata_name", "freq")
cat_all <- data.frame(xtabs(~ df[, var_cat[i]], addNA = TRUE))
names(cat_all) <- c("var_name", "freq")
cat_all$strata_name <- "Overall"
cat <- rbind(cat_strata, cat_all)
cat_sum <- aggregate(cat$freq ~ cat[, 2], cat, sum)
names(cat_sum) <- c("strata_name", "sum")
cat <- left_join(cat, cat_sum, by = "strata_name")
cat$percent <- paste0("(", roundHAFZ(cat$freq / cat$sum * 100), ")")
cat$stat <- paste(format(cat$freq, big.mark = ","), cat$percent)
cat <- cat[, c("var_name", "strata_name", "stat")]
var_name <- names(df)[which(names(df) %in% var_cat[i])]
cat_name <- levels(cat$strata_name)
cat <- rbind(cbind(var_name = paste(var_name, "/ N(%)"), strata_name = cat_name, stat = ""), cat)
cat <- reshape(cat, timevar = "strata_name", idvar = "var_name", direction = "wide")
col_name <- c("Characteristics", cat_name)
names(cat) <- col_name
names(df_cat) <- col_name
df_cat <- rbind(df_cat, cat)
}
} else df_cat <- NULL
df_n <- data.frame(matrix(nrow = 0, ncol = length(levels(df[, var_strata])) + 2))
n_strata <- data.frame(xtabs(~ df[, var_strata], addNA = TRUE))
names(n_strata) <- c("strata_name", "freq")
n_all <- data.frame(strata_name = "Overall", freq = nrow(df))
n_strata <- rbind(n_strata, n_all)
n_strata$sum <- sum(n_strata$freq[-nrow(n_strata)])
n_strata$percent <- paste0("(", roundHAFZ(n_strata$freq / n_strata$sum * 100), ")")
n_strata$stat <- paste(format(n_strata$freq, big.mark = ","), n_strata$percent)
n_strata <- n_strata[, c("strata_name", "stat")]
n_strata <- t(rbind(cbind(strata_name = "Characteristics", stat = "N(%)"), n_strata))
col_name <- n_strata[1, ]
n_strata <- data.frame(n_strata, row.names = NULL)[-1, ]
names(n_strata) <- col_name
names(df_n) <- col_name
df_n <- rbind(df_n, n_strata)
tableone <- data.frame(rbind(df_n, df_cont, df_cat), row.names = NULL, check.names = FALSE)
return(tableone)
}
# TABLE 1 BY ROWS ---------------------------------------------------------
fun.tableone_by_rows <- function(df, var_cat, var_strata){
# %s are calculated across rows
# 'cat_var', 'var_strata' are categorical
# example:
# var_cat = "Year_of_diagnosis"
# var_strata = "Sex"
# df = reg_inc
if(!is.null(var_cat)){
df_cat <- data.frame(matrix(nrow = 0, ncol = length(levels(df[, var_strata])) + 2))
for(i in 1:length(var_cat)){
cat_strata <- data.frame(xtabs(~ df[, var_cat] + df[, var_strata], addNA = TRUE))
names(cat_strata) <- c("var_name", "strata_name", "freq")
strata_sum <- aggregate(cat_strata$freq ~ cat_strata$var_name, cat_strata, sum)
names(strata_sum) <- c("var_name", "sum")
cat_all <- strata_sum
names(cat_all) <- c("var_name", "freq")
cat_all$strata_name <- "Overall"
cat <- rbind(cat_strata, cat_all)
cat <- left_join(cat, strata_sum, by = "var_name")
cat$percent <- paste0("(", roundHAFZ(cat$freq / cat$sum * 100), ")")
cat$stat <- paste(format(cat$freq, big.mark = ","), cat$percent)
cat <- cat[, c("var_name", "strata_name", "stat")]
strata_name <- levels(cat$strata_name)
cat <- reshape(cat, timevar = "strata_name", idvar = "var_name", direction = "wide")
col_name <- c("Characteristics", strata_name)
names(cat) <- col_name
var_name <- names(df)[which(names(df) %in% var_cat)]
var_line <- data.frame(t(c(paste(var_name, "/ N(%)"), rep("", ncol(cat) - 1))))
names(var_line) <- col_name
names(df_cat) <- col_name
cat$Characteristics <- as.character(cat$Characteristics)
df_cat <- rbind(df_cat, var_line)
df_cat <- rbind(df_cat, cat)
}
} else df_cat <- NULL
df_n <- data.frame(matrix(nrow = 0, ncol = length(levels(df[, var_strata])) + 2))
n_strata <- data.frame(xtabs(~ df[, var_strata], addNA = TRUE))
names(n_strata) <- c("strata_name", "freq")
n_all <- data.frame(strata_name = "Overall", freq = nrow(df))
n_strata <- rbind(n_strata, n_all)
n_strata$sum <- sum(n_strata$freq[-nrow(n_strata)])
n_strata$percent <- paste0("(", roundHAFZ(n_strata$freq / n_strata$sum * 100), ")")
n_strata$stat <- paste(format(n_strata$freq, big.mark = ","), n_strata$percent)
n_strata <- n_strata[, c("strata_name", "stat")]
n_strata <- t(rbind(cbind(strata_name = "Characteristics", stat = "N(%)"), n_strata))
col_name <- n_strata[1, ]
n_strata <- data.frame(n_strata, row.names = NULL)[-1, ]
names(n_strata) <- col_name
names(df_n) <- col_name
df_n <- rbind(df_n, n_strata)
tableone <- data.frame(rbind(df_n, df_cat), row.names = NULL, check.names = FALSE)
return(tableone)
}
# FISHER's TEST -----------------------------------------------------------
fun.pval_fisher <- function(df, var1, var2, levels1 = "all", levels2 = "all"){
# calculates p-value of Fisher's test for particular levels of variables
# 'var1', 'var2' are categorical
# example:
# df = reg_inc
# var1 = "Summary_stage_2000_1998"
# var2 = "Race_Ethnicity"
# levels1 = c("Localized", "Regional", "Distant")
# levels2 = c("American Indian or Alaska Native", "Hispanic", "Non-Hispanic White")
if(sum(levels1 == "all") < 1 && sum(levels2 == "all") >= 1){
counts <- xtabs(~ df[, var1] + df[, var2])
counts <- counts[levels1, ]
} else if(sum(levels1 == "all") >= 1 && sum(levels2 == "all") < 1){
counts <- xtabs(~ df[, var1] + df[, var2])
counts <- counts[, levels2]
} else if(sum(levels1 == "all") < 1 && sum(levels2 == "all") < 1){
counts <- xtabs(~ df[, var1] + df[, var2])
counts <- counts[levels1, levels2]
} else counts <- xtabs(~ df[, var1] + df[, var2])
# increase the workspace argument
test <- fisher.test(counts, simulate.p.value=TRUE, B=1e5)
pval <- roundPVAL(test$p.value)
return(pval)
}
# SUMMARY TABLES ----------------------------------------------------------
# for 'Age_at_diagnosis'
fun.summary_group <- function(df, var1, var2){
# shows summary statistics: n, mean, sd, median, `min-max` for 'Age_at_diagnosis'
# 'var1' is categorical, 'var2' is continuous
# example:
# df = df_inc
# var1 = Region
# var2 = Age_at_diagnosis
var1 <- enquo(var1)
var2 <- enquo(var2)
df %>%
group_by(!!var1) %>%
dplyr::summarize(
n = format(n(), big.mark = ","),
Mean = roundHAFZ(mean((!!var2), na.rm = TRUE)),
SD = roundHAFZ(sd((!!var2), na.rm = TRUE)),
Median = roundHAFZ(median((!!var2), na.rm = TRUE)),
`Min-Max` = paste(roundHAFZ(min((!!var2), na.rm = TRUE)), "-",
roundHAFZ(max((!!var2), na.rm = TRUE)))
)
}
# ANOVA P-VALUE -----------------------------------------------------------
fun.pval_anova <- function(df, var1, var2, levels1 = "all"){
# calculates p-value of Fisher's test for particular levels of variables
# 'var1' is categorical, 'var2' is continuous
# example:
# df = reg_inc
# var1 = "Race_Ethnicity"
# var2 = "Age_at_diagnosis"
# levels1 = c("American Indian or Alaska Native", "Hispanic", "Non-Hispanic White")
if(sum(levels1 == "all") < 1){
index <- which(df[, var1] %in% levels1)
df <- df[index, ]
}
test <- aov(df[, var2] ~ df[, var1])
pval <- roundPVAL(summary(test)[[1]][["Pr(>F)"]][[1]])
return(pval)
}
# AGE-ADJUSTED RATES ------------------------------------------------------
fun.aarate <- function(df, pop){
# age-adjusted incidence rates for analyzed cancer type and APC
# : http://seer.cancer.gov/seerstat/WebHelp/Rate_Algorithms.htm
# : https://seer.cancer.gov/seerstat/WebHelp/Trend_Algorithms.htm
# example:
# df = subset(reg_inc, Sex == "Male")
# pop = pop_male_reg
# these 2 columns are enough
df <- df[, c("Age_recode_with_1_year_olds", "Year_of_diagnosis")]
# drop 'Unknown' level in Age_recode_with_1_year_olds
df <- df[!is.na(df$Age_recode_with_1_year_olds %in% "Unknown"), ]
df$Age_recode_with_1_year_olds <- droplevels(df$Age_recode_with_1_year_olds)
# as some age groups may be not presented in data subset
age_grs <- levels(df$Age_recode_with_1_year_olds)
pop2 <- pop[pop$Age %in% age_grs, ]
pop_sum <- sum(pop_std$Population)
pop_std <- pop_std[pop_std$Age %in% age_grs, ]
df <- xtabs(~ Age_recode_with_1_year_olds + Year_of_diagnosis, df)
aarate <- colSums(df / pop2[, -1] * 100000 * (pop_std$Population / pop_sum))
SE <- sqrt(colSums((df / (pop2[, -1])^2) * ((pop_std$Population / pop_sum)^2))) * 100000
LCL <- roundHAFZ(aarate - 1.96 * SE)
UCL <- roundHAFZ(aarate + 1.96 * SE)
# suppress if counts < 15 or population < 10,000 - for stability of aarates
df_suppress <- which(colSums(df[, 2:ncol(df)]) < 15)
pop_suppress <- which(colSums(pop2[, 2:ncol(pop2)]) < 10000)
suppress <- df_suppress | pop_suppress
# APC (Annual Percent Change)
Year <- as.numeric(names(aarate))
if(sum(Year == years) >= 1 & length(suppress) == 0){
fit <- lm(log(aarate) ~ Year)
beta <- summary(fit)$coefficients[2,1]
pval <- roundPVAL(summary(fit)$coefficients[2,4])
apc <- roundHAFZ(100 * (exp(beta) - 1))
if(pval <= 0.05){
pval <- paste0("(", pval, ")*")
} else pval <- paste0("(", pval, ")")
} else {
apc <- ""
pval <- ""
}
aarate <- roundHAFZ(aarate)
SE <- roundHAFZ(SE)
LCL <- roundHAFZ(LCL)
UCL <- roundHAFZ(UCL)
aarate <- data.frame(Year = Year, aarate = aarate, SE = SE, LCL = LCL, UCL = UCL, row.names = NULL)
aarate[nrow(aarate) + 1, 1] <- "APC (p-value)"
aarate[nrow(aarate), 2] <- apc
aarate[nrow(aarate), 3] <- pval
aarate[nrow(aarate), 4:5] <- ""
# surppress if counts < 15 or population < 10,000
if(length(suppress) > 0){
aarate[suppress, 2] <- "x"
aarate[suppress, 3:5] <- ""
}
return(aarate)
}
# K-M SURVIVAL PLOTS ------------------------------------------------------
plot.KM_strata <- function(df, strataName, COD, survTimeVar, title, xlab, xTicksBy, xshift, survTicks, cutoff = NULL){
# example:
# df = df_surv
# strataName = "Region"
# COD = "COD_CS"
# survTimeVar = "TimeSurv"
# title = "Registries"
# xlab = "Time in months"
# xTicksBy = 12
# xshift = 6
# survTicks = c(12, 24, 60, 120)
# cutoff = NULL
# levels of variable to be plotted
category <- df[, strataName]
n <- length(levels(as.factor(category)))
time <- df[, survTimeVar]
COD <- df[, COD]
if(!is.null(cutoff)){
# truncate data
# adjust Time
time_cut <- NA
time_cut[time <= cutoff] <- time[time <= cutoff]
time_cut[is.na(time_cut)] <- cutoff
# adjust COD
COD_cut <- NA
COD_cut[time <= cutoff] <- df$COD[time <= cutoff]
COD_cut[is.na(COD_cut)] <- 0
time <- time_cut
COD <- COD_cut
}
# Kaplan-Meier estimates
fitKM <- survfit(Surv(time, COD)~ category, conf.type="log-log")
fitKMdiff <- survdiff(Surv(time, COD)~ category, rho=0) # Log-rank
# Log-Rank p-value
pval1 <- 1 - pchisq(fitKMdiff$chisq, length(fitKMdiff$n) - 1)
pval1 <- roundPVAL(pval1)
# S(t) for particular time points & factor levels
percentsLong <- data.frame(time = summary(fitKM, times=c(0, survTicks))$time,
persentSurv = roundHAFZ(summary(fitKM, times=c(0, survTicks))$surv*100),
levels = summary(fitKM, times=c(0, survTicks))$strata)
percentsLong$levels <- gsub("category=", "", percentsLong$levels)
# Long -> Wide format
percentsWide <- reshape(percentsLong, idvar = "levels", timevar = "time", direction = "wide")
namesWide <- data.frame(levels = percentsWide[ , 1])
names(namesWide) <- paste("% of survived")
percentsWide <- data.frame(percentsWide[, 2:ncol(percentsWide)])
groupsNames <- levels(as.factor(category))
# Plot
par(mai=c(1, 1, 1, 2.5)) # increase right margin space for a legend
plot(fitKM, lty=1:n, col = 1:n, lwd = 2, xlab = xlab, ylab = 'Survival probability', cex.lab = 1.2, axes = FALSE)
legend("topright", inset = c(-0.5, 0), xpd = TRUE, xjust = 0, yjust = 0.5, box.lty = 1, box.lwd = 1, groupsNames, lty = 1:n, col = 1:n, lwd = 2)
abline(h = 0.5, lty = "dotted", col = "red", lwd = 1)
xmax = max(time)
axis(side = 1, at = seq(0, xmax, xTicksBy), cex.axis = 0.8)
axis(side = 2, at = seq(0, 1, 0.1), cex.axis = 0.8)
# Add Text on plot
mylabel = paste("Log-rank p-value = ", pval1, sep = "")
mtext(mylabel, 3, line = 0, adj = 0)
title(main = title, line = 2)
addtable2plot(par('usr')[2], par('usr')[1], cex = 0.8, display.colnames = T, table = namesWide)
for(i in 1:length(survTicks)){
lines(x = rep(survTicks[i], 2),
y = c(-1, 0), lty = "dotted")
}
# every time need to shift output of % of survived, otherwise it's not exactly where want it to be;
# it's practically impossible to have <1 year of survival, thus can omit such scenario
if(ncol(percentsWide) == 2){
addtable2plot(survTicks[1] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 2]), bg="transparent")
} else if(ncol(percentsWide) == 3){
addtable2plot(survTicks[1] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 2]), bg="transparent")
addtable2plot(survTicks[2] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 3]), bg="transparent")
} else if(ncol(percentsWide) == 4){
addtable2plot(survTicks[1] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 2]), bg="transparent")
addtable2plot(survTicks[2] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 3]), bg="transparent")
addtable2plot(survTicks[3] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 4]), bg="transparent")
} else if(ncol(percentsWide) == 5){
addtable2plot(survTicks[1] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 2]), bg="transparent")
addtable2plot(survTicks[2] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 3]), bg="transparent")
addtable2plot(survTicks[3] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 4]), bg="transparent")
addtable2plot(survTicks[4] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 5]), bg="transparent")
}
}
# K-M SURVIVAL TABLES -----------------------------------------------------
fun.KM_tbl <- function(df, strataName, COD, COD_abbr, survTimeVar, cutoff = NULL){
# example:
# df = df_surv
# strataName = "Region"
# COD = "COD_CS"
# COD_abbr = "CS"
# survTimeVar = "TimeSurv"
# cutoff = NULL
# levels of variable to be plotted
strata <- df[, strataName]
n <- length(levels(as.factor(strata)))
time <- df[, survTimeVar]
COD <- df[, COD]
if(!is.null(cutoff)){
# truncate data
# adjust Time
time_cut <- NA
time_cut[time <= cutoff] <- time[time <= cutoff]
time_cut[is.na(time_cut)] <- cutoff
# adjust COD
COD_cut <- NA
COD_cut[time <= cutoff] <- df$COD[time <= cutoff]
COD_cut[is.na(COD_cut)] <- 0
time <- time_cut
COD <- COD_cut
}
# Kaplan-Meier estimates
fitKM <- survfit(Surv(time, COD)~ strata, conf.type="log-log")
tbl <- roundHAFZ(summary(fitKM)$table[, c(1, 4:9)])
rownames(tbl) <- paste0(rownames(tbl), " (", COD_abbr, ")")
return(tbl)
}
# Cox PH -----------------------------------------------------------------
fun.Cox_multiv <- function(df, varNames, COD, survTimeVar){
# example:
# df = df_surv
# varNames = c("Age_at_diagnosis", "Region", "Race_Ethnicity", "Sex")
# COD = "COD_CS"
# survTimeVar = "TimeSurv"
# long = TRUE
CoxMult <- df[, varNames]
CoxMult <- data.frame(CoxMult, df[, c(survTimeVar, COD)])
names(CoxMult) <- c(varNames, "survTimeVar", "COD")
fit <- coxph(Surv((survTimeVar), COD) ~ ., CoxMult)
fitCoxMulti <- summary((coxph(Surv((survTimeVar), COD) ~ ., CoxMult)))
# Schoenfeld residuals (test violation of proportionality)
test <- cox.zph(fit, transform = "rank")
# want specific output from Proportional Hazards Regression Model in a matrix format
# exp(coef)
HR <- roundHAFZ(as.matrix(fitCoxMulti$coefficients)[, 2], n = 3)
# CI
CI <- roundHAFZ(as.matrix(fitCoxMulti$conf.int)[, c(3, 4)], n = 3)
# p-value
pval <- roundPVAL(roundHAFZ(as.matrix(fitCoxMulti$coefficients)[, 5], n = 3))
# combine all necessary coefficients
CoxMultiCoef <- NA
CoxMultiCoef <- as.matrix(cbind(HR,
paste(CI[, 1], "-", CI[, 2], sep=""),
pval))
# PH assumption p-value
ph <- as.vector(test$table[-dim(test$table)[1], 3], mode="numeric")
ph <- roundHAFZ(ph, n = 3)
for(i in 1:length(ph)){
if(ph[i] == 0){ph[i] = "<0.001"}
}
CoxMultiCoef <- data.frame(cbind(CoxMultiCoef, ph))
names(CoxMultiCoef) <- c("Multivariable HR", "95% CI", "p-value", "P(PH)")
return(CoxMultiCoef)
}
|
/code/generic/functions.R
|
no_license
|
zgalochkina/SEER_solid_tumor
|
R
| false
| false
| 25,429
|
r
|
# ROUND VALUES ------------------------------------------------------------
# R uses 'Round half to even' rule for rounding.
# 'Round half to even' is a tie-breaking rule that is less biased.
# x = c(1.85, 1.54, 1.65, 1.75, 1.85, 1.84, 1)
# (round(x, 1))
# 'Round half away from zero' will be used for results output.
# https://github.com/pedroguarderas/learnR/blob/master/R_base/session_26.R
roundHAFZ <- function(x, n = 1) {
posneg = sign(x)
z = abs(x)*10^n
z = z + 0.5
z = trunc(z)
z = z/10^n
z*posneg
}
# (roundHAFZ(x))
# ROUND P-VALUE -----------------------------------------------------------
roundPVAL <- function(pval){
# present small p-values as "<0.001",
# other p-values will have 3 digits after decimal point
if(length(pval) == 1){
pval = roundHAFZ(pval, 3)
if(pval == 0){
pval = paste("<0.001")
}
} else{
for(i in 1:length(pval)){
if(pval[i] == 0){pval[i] = "<0.001"}
else pval[i] = pval[i]
}
}
return(pval)
}
# AJCC / GRADES 2004+ -----------------------------------------------------
# only works specifically for SEER data
fun.ajcc_grade <- function(df){
# creates 'Stage_Grade' variable with 5 possible values = { 1 / 2 / 3 / 4 / Unknown / Other }
# to unify levels of 2 different variables: AJCC Stage 6th / 7th ed. and Grade
# example:
# df = seer
if(min(df$Year_of_diagnosis) < 2004){
stop("Minimum 'Year of diagnosis' has to be not less than 2004.")
}
if(ajcc_grade == "AJCC" && ajcc_ed == "6th and 7th editions"){
df$Stage_Grade_6th <- NA
df$Stage_Grade_6th[str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "IV") == 1] <- "4"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th) & str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "III") == 1] <- "3"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th) & str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "II") == 1] <- "2"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th) & str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "I") == 1] <- "1"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th)] <- as.character(df$Derived_AJCC_Stage_Group_6th_ed_2004[is.na(df$Stage_Grade_6th)])
df$Stage_Grade_7th <- NA
df$Stage_Grade_7th[str_count(df$Derived_AJCC_Stage_Group_7th_ed_2010, "IV") == 1] <- "4"
df$Stage_Grade_7th[is.na(df$Stage_Grade_7th) & str_count(df$Derived_AJCC_Stage_Group_7th_ed_2010, "III") == 1] <- "3"
df$Stage_Grade_7th[is.na(df$Stage_Grade_7th) & str_count(df$Derived_AJCC_Stage_Group_7th_ed_2010, "II") == 1] <- "2"
df$Stage_Grade_7th[is.na(df$Stage_Grade_7th) & str_count(df$Derived_AJCC_Stage_Group_7th_ed_2010, "I") == 1] <- "1"
df$Stage_Grade_7th[is.na(df$Stage_Grade_7th)] <- as.character(df$Derived_AJCC_Stage_Group_7th_ed_2010[is.na(df$Stage_Grade_7th)])
df$Stage_Grade <- NA
df$Stage_Grade[df$Year_of_diagnosis %in% 2004:2009] <- df$Stage_Grade_6th[df$Year_of_diagnosis %in% 2004:2009]
df$Stage_Grade[df$Year_of_diagnosis >= 2010] <- df$Stage_Grade_7th[df$Year_of_diagnosis >= 2010]
# "UNK Stage" - cases that do not have enough information to be staged.
df$Stage_Grade[df$Stage_Grade == "UNK Stage"] <- "Unknown"
# "Other" than 1 / 2 / 3 / 4 / "Unknown": includes "Not applicable" & "OCCULT"
# "NA" (not applicable) is not an AJCC stage category. It is used as a placeholder for histologies that AJCC does not stage.
# In "OCCULT" stage NSCLC, cancer cells are found in the sputum (mucus from the lungs), but tumors are not immediately visible.
df$Stage_Grade[!(df$Stage_Grade %in% c("1", "2", "3", "4", "Unknown"))] <- "Other"
} else if(ajcc_grade == "AJCC" && ajcc_ed == "6th edition"){
df$Stage_Grade_6th <- NA
df$Stage_Grade_6th[str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "IV") == 1] <- "4"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th) & str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "III") == 1] <- "3"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th) & str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "II") == 1] <- "2"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th) & str_count(df$Derived_AJCC_Stage_Group_6th_ed_2004, "I") == 1] <- "1"
df$Stage_Grade_6th[is.na(df$Stage_Grade_6th)] <- as.character(df$Derived_AJCC_Stage_Group_6th_ed_2004[is.na(df$Stage_Grade_6th)])
df$Stage_Grade_7th <- NA
df$Stage_Grade <- df$Stage_Grade_6th
# "UNK Stage" - cases that do not have enough information to be staged.
df$Stage_Grade[df$Stage_Grade == "UNK Stage"] <- "Unknown"
# "Other" than 1 / 2 / 3 / 4 / "Unknown": includes "Not applicable" & "OCCULT"
# "NA" (not applicable) is not an AJCC stage category. It is used as a placeholder for histologies that AJCC does not stage.
# In "OCCULT" stage NSCLC, cancer cells are found in the sputum (mucus from the lungs), but tumors are not immediately visible.
df$Stage_Grade[!(df$Stage_Grade %in% c("1", "2", "3", "4", "Unknown"))] <- "Other"
} else if(ajcc_grade == "Grade"){
df$Stage_Grade_6th <- NA
df$Stage_Grade_7th <- NA
df$Stage_Grade <- NA
df$Stage_Grade[str_count(df$Grade, "IV") == 1] <- "4"
df$Stage_Grade[is.na(df$Stage_Grade) & str_count(df$Grade, "III") == 1] <- "3"
df$Stage_Grade[is.na(df$Stage_Grade) & str_count(df$Grade, "II") == 1] <- "2"
df$Stage_Grade[is.na(df$Stage_Grade) & str_count(df$Grade, "I") == 1] <- "1"
df$Stage_Grade[is.na(df$Stage_Grade)] <- as.character(df$Grade[is.na(df$Stage_Grade)])
# Includes: 'T-cell' / 'B-cell; pre-B; B-precursor' / 'Null cell; non T-non B' / 'NK cell; natural killer cell (1995+)'
df$Stage_Grade[!(df$Stage_Grade %in% c("1", "2", "3", "4", "Unknown"))] <- "Other"
}
return(df)
}
# TABLE 1 -----------------------------------------------------------------
fun.tableone <- function(df, var_cont, var_cat, var_strata){
# example:
# var_cont = "Age_at_diagnosis"
# var_cat = c("Sex", "Summary_stage_2000_1998", "Stage_Grade")
# var_strata = "Race_Ethnicity"
# df = reg_inc
if(!is.null(var_cont)){
df_cont <- data.frame(matrix(nrow = 0, ncol = length(levels(df[, var_strata])) + 2))
for(i in 1:length(var_cont)){
# continuous variables
cont_strata <- data.frame(groups = aggregate(df[, var_cont[i]] ~ df[, var_strata], df, mean)[, 1],
mean = roundHAFZ(aggregate(df[, var_cont[i]] ~ df[, var_strata], df, mean)[, 2]),
sd = paste0("(", roundHAFZ(aggregate(df[, var_cont[i]] ~ df[, var_strata], df, sd)[, 2]), ")"))
cont_all <- data.frame(groups = "Overall",
mean = roundHAFZ(mean(df[, var_cont[i]])),
sd = paste0("(", roundHAFZ(sd(df[, var_cont[i]])), ")"))
cont <- data.frame(rbind(cont_strata, cont_all))
cont$stat <- paste(format(cont$mean, big.mark = ","), cont$sd)
cont <- cont[, c("groups", "stat")]
var_name <- names(df)[which(names(df) %in% var_cont[i])]
cont <- t(rbind(cbind(groups = "Characteristics", stat = paste(var_name, "/ (mean(sd))")), cont))
col_name <- cont[1, ]
cont <- data.frame(cont, row.names = NULL)[-1, ]
names(cont) <- col_name
names(df_cont) <- col_name
df_cont <- rbind(df_cont, cont)
}
} else df_cont <- NULL
if(!is.null(var_cat)){
df_cat <- data.frame(matrix(nrow = 0, ncol = length(levels(df[, var_strata])) + 2))
for(i in 1:length(var_cat)){
# categorical variables
cat_strata <- data.frame(xtabs(~ df[, var_cat[i]] + df[, var_strata], addNA = TRUE))
names(cat_strata) <- c("var_name", "strata_name", "freq")
cat_all <- data.frame(xtabs(~ df[, var_cat[i]], addNA = TRUE))
names(cat_all) <- c("var_name", "freq")
cat_all$strata_name <- "Overall"
cat <- rbind(cat_strata, cat_all)
cat_sum <- aggregate(cat$freq ~ cat[, 2], cat, sum)
names(cat_sum) <- c("strata_name", "sum")
cat <- left_join(cat, cat_sum, by = "strata_name")
cat$percent <- paste0("(", roundHAFZ(cat$freq / cat$sum * 100), ")")
cat$stat <- paste(format(cat$freq, big.mark = ","), cat$percent)
cat <- cat[, c("var_name", "strata_name", "stat")]
var_name <- names(df)[which(names(df) %in% var_cat[i])]
cat_name <- levels(cat$strata_name)
cat <- rbind(cbind(var_name = paste(var_name, "/ N(%)"), strata_name = cat_name, stat = ""), cat)
cat <- reshape(cat, timevar = "strata_name", idvar = "var_name", direction = "wide")
col_name <- c("Characteristics", cat_name)
names(cat) <- col_name
names(df_cat) <- col_name
df_cat <- rbind(df_cat, cat)
}
} else df_cat <- NULL
df_n <- data.frame(matrix(nrow = 0, ncol = length(levels(df[, var_strata])) + 2))
n_strata <- data.frame(xtabs(~ df[, var_strata], addNA = TRUE))
names(n_strata) <- c("strata_name", "freq")
n_all <- data.frame(strata_name = "Overall", freq = nrow(df))
n_strata <- rbind(n_strata, n_all)
n_strata$sum <- sum(n_strata$freq[-nrow(n_strata)])
n_strata$percent <- paste0("(", roundHAFZ(n_strata$freq / n_strata$sum * 100), ")")
n_strata$stat <- paste(format(n_strata$freq, big.mark = ","), n_strata$percent)
n_strata <- n_strata[, c("strata_name", "stat")]
n_strata <- t(rbind(cbind(strata_name = "Characteristics", stat = "N(%)"), n_strata))
col_name <- n_strata[1, ]
n_strata <- data.frame(n_strata, row.names = NULL)[-1, ]
names(n_strata) <- col_name
names(df_n) <- col_name
df_n <- rbind(df_n, n_strata)
tableone <- data.frame(rbind(df_n, df_cont, df_cat), row.names = NULL, check.names = FALSE)
return(tableone)
}
# TABLE 1 BY ROWS ---------------------------------------------------------
fun.tableone_by_rows <- function(df, var_cat, var_strata){
# %s are calculated across rows
# 'cat_var', 'var_strata' are categorical
# example:
# var_cat = "Year_of_diagnosis"
# var_strata = "Sex"
# df = reg_inc
if(!is.null(var_cat)){
df_cat <- data.frame(matrix(nrow = 0, ncol = length(levels(df[, var_strata])) + 2))
for(i in 1:length(var_cat)){
cat_strata <- data.frame(xtabs(~ df[, var_cat] + df[, var_strata], addNA = TRUE))
names(cat_strata) <- c("var_name", "strata_name", "freq")
strata_sum <- aggregate(cat_strata$freq ~ cat_strata$var_name, cat_strata, sum)
names(strata_sum) <- c("var_name", "sum")
cat_all <- strata_sum
names(cat_all) <- c("var_name", "freq")
cat_all$strata_name <- "Overall"
cat <- rbind(cat_strata, cat_all)
cat <- left_join(cat, strata_sum, by = "var_name")
cat$percent <- paste0("(", roundHAFZ(cat$freq / cat$sum * 100), ")")
cat$stat <- paste(format(cat$freq, big.mark = ","), cat$percent)
cat <- cat[, c("var_name", "strata_name", "stat")]
strata_name <- levels(cat$strata_name)
cat <- reshape(cat, timevar = "strata_name", idvar = "var_name", direction = "wide")
col_name <- c("Characteristics", strata_name)
names(cat) <- col_name
var_name <- names(df)[which(names(df) %in% var_cat)]
var_line <- data.frame(t(c(paste(var_name, "/ N(%)"), rep("", ncol(cat) - 1))))
names(var_line) <- col_name
names(df_cat) <- col_name
cat$Characteristics <- as.character(cat$Characteristics)
df_cat <- rbind(df_cat, var_line)
df_cat <- rbind(df_cat, cat)
}
} else df_cat <- NULL
df_n <- data.frame(matrix(nrow = 0, ncol = length(levels(df[, var_strata])) + 2))
n_strata <- data.frame(xtabs(~ df[, var_strata], addNA = TRUE))
names(n_strata) <- c("strata_name", "freq")
n_all <- data.frame(strata_name = "Overall", freq = nrow(df))
n_strata <- rbind(n_strata, n_all)
n_strata$sum <- sum(n_strata$freq[-nrow(n_strata)])
n_strata$percent <- paste0("(", roundHAFZ(n_strata$freq / n_strata$sum * 100), ")")
n_strata$stat <- paste(format(n_strata$freq, big.mark = ","), n_strata$percent)
n_strata <- n_strata[, c("strata_name", "stat")]
n_strata <- t(rbind(cbind(strata_name = "Characteristics", stat = "N(%)"), n_strata))
col_name <- n_strata[1, ]
n_strata <- data.frame(n_strata, row.names = NULL)[-1, ]
names(n_strata) <- col_name
names(df_n) <- col_name
df_n <- rbind(df_n, n_strata)
tableone <- data.frame(rbind(df_n, df_cat), row.names = NULL, check.names = FALSE)
return(tableone)
}
# FISHER's TEST -----------------------------------------------------------
fun.pval_fisher <- function(df, var1, var2, levels1 = "all", levels2 = "all"){
# calculates p-value of Fisher's test for particular levels of variables
# 'var1', 'var2' are categorical
# example:
# df = reg_inc
# var1 = "Summary_stage_2000_1998"
# var2 = "Race_Ethnicity"
# levels1 = c("Localized", "Regional", "Distant")
# levels2 = c("American Indian or Alaska Native", "Hispanic", "Non-Hispanic White")
if(sum(levels1 == "all") < 1 && sum(levels2 == "all") >= 1){
counts <- xtabs(~ df[, var1] + df[, var2])
counts <- counts[levels1, ]
} else if(sum(levels1 == "all") >= 1 && sum(levels2 == "all") < 1){
counts <- xtabs(~ df[, var1] + df[, var2])
counts <- counts[, levels2]
} else if(sum(levels1 == "all") < 1 && sum(levels2 == "all") < 1){
counts <- xtabs(~ df[, var1] + df[, var2])
counts <- counts[levels1, levels2]
} else counts <- xtabs(~ df[, var1] + df[, var2])
# increase the workspace argument
test <- fisher.test(counts, simulate.p.value=TRUE, B=1e5)
pval <- roundPVAL(test$p.value)
return(pval)
}
# SUMMARY TABLES ----------------------------------------------------------
# for 'Age_at_diagnosis'
fun.summary_group <- function(df, var1, var2){
# shows summary statistics: n, mean, sd, median, `min-max` for 'Age_at_diagnosis'
# 'var1' is categorical, 'var2' is continuous
# example:
# df = df_inc
# var1 = Region
# var2 = Age_at_diagnosis
var1 <- enquo(var1)
var2 <- enquo(var2)
df %>%
group_by(!!var1) %>%
dplyr::summarize(
n = format(n(), big.mark = ","),
Mean = roundHAFZ(mean((!!var2), na.rm = TRUE)),
SD = roundHAFZ(sd((!!var2), na.rm = TRUE)),
Median = roundHAFZ(median((!!var2), na.rm = TRUE)),
`Min-Max` = paste(roundHAFZ(min((!!var2), na.rm = TRUE)), "-",
roundHAFZ(max((!!var2), na.rm = TRUE)))
)
}
# ANOVA P-VALUE -----------------------------------------------------------
fun.pval_anova <- function(df, var1, var2, levels1 = "all"){
# calculates p-value of Fisher's test for particular levels of variables
# 'var1' is categorical, 'var2' is continuous
# example:
# df = reg_inc
# var1 = "Race_Ethnicity"
# var2 = "Age_at_diagnosis"
# levels1 = c("American Indian or Alaska Native", "Hispanic", "Non-Hispanic White")
if(sum(levels1 == "all") < 1){
index <- which(df[, var1] %in% levels1)
df <- df[index, ]
}
test <- aov(df[, var2] ~ df[, var1])
pval <- roundPVAL(summary(test)[[1]][["Pr(>F)"]][[1]])
return(pval)
}
# AGE-ADJUSTED RATES ------------------------------------------------------
fun.aarate <- function(df, pop){
# age-adjusted incidence rates for analyzed cancer type and APC
# : http://seer.cancer.gov/seerstat/WebHelp/Rate_Algorithms.htm
# : https://seer.cancer.gov/seerstat/WebHelp/Trend_Algorithms.htm
# example:
# df = subset(reg_inc, Sex == "Male")
# pop = pop_male_reg
# these 2 columns are enough
df <- df[, c("Age_recode_with_1_year_olds", "Year_of_diagnosis")]
# drop 'Unknown' level in Age_recode_with_1_year_olds
df <- df[!is.na(df$Age_recode_with_1_year_olds %in% "Unknown"), ]
df$Age_recode_with_1_year_olds <- droplevels(df$Age_recode_with_1_year_olds)
# as some age groups may be not presented in data subset
age_grs <- levels(df$Age_recode_with_1_year_olds)
pop2 <- pop[pop$Age %in% age_grs, ]
pop_sum <- sum(pop_std$Population)
pop_std <- pop_std[pop_std$Age %in% age_grs, ]
df <- xtabs(~ Age_recode_with_1_year_olds + Year_of_diagnosis, df)
aarate <- colSums(df / pop2[, -1] * 100000 * (pop_std$Population / pop_sum))
SE <- sqrt(colSums((df / (pop2[, -1])^2) * ((pop_std$Population / pop_sum)^2))) * 100000
LCL <- roundHAFZ(aarate - 1.96 * SE)
UCL <- roundHAFZ(aarate + 1.96 * SE)
# suppress if counts < 15 or population < 10,000 - for stability of aarates
df_suppress <- which(colSums(df[, 2:ncol(df)]) < 15)
pop_suppress <- which(colSums(pop2[, 2:ncol(pop2)]) < 10000)
suppress <- df_suppress | pop_suppress
# APC (Annual Percent Change)
Year <- as.numeric(names(aarate))
if(sum(Year == years) >= 1 & length(suppress) == 0){
fit <- lm(log(aarate) ~ Year)
beta <- summary(fit)$coefficients[2,1]
pval <- roundPVAL(summary(fit)$coefficients[2,4])
apc <- roundHAFZ(100 * (exp(beta) - 1))
if(pval <= 0.05){
pval <- paste0("(", pval, ")*")
} else pval <- paste0("(", pval, ")")
} else {
apc <- ""
pval <- ""
}
aarate <- roundHAFZ(aarate)
SE <- roundHAFZ(SE)
LCL <- roundHAFZ(LCL)
UCL <- roundHAFZ(UCL)
aarate <- data.frame(Year = Year, aarate = aarate, SE = SE, LCL = LCL, UCL = UCL, row.names = NULL)
aarate[nrow(aarate) + 1, 1] <- "APC (p-value)"
aarate[nrow(aarate), 2] <- apc
aarate[nrow(aarate), 3] <- pval
aarate[nrow(aarate), 4:5] <- ""
# surppress if counts < 15 or population < 10,000
if(length(suppress) > 0){
aarate[suppress, 2] <- "x"
aarate[suppress, 3:5] <- ""
}
return(aarate)
}
# K-M SURVIVAL PLOTS ------------------------------------------------------
plot.KM_strata <- function(df, strataName, COD, survTimeVar, title, xlab, xTicksBy, xshift, survTicks, cutoff = NULL){
# example:
# df = df_surv
# strataName = "Region"
# COD = "COD_CS"
# survTimeVar = "TimeSurv"
# title = "Registries"
# xlab = "Time in months"
# xTicksBy = 12
# xshift = 6
# survTicks = c(12, 24, 60, 120)
# cutoff = NULL
# levels of variable to be plotted
category <- df[, strataName]
n <- length(levels(as.factor(category)))
time <- df[, survTimeVar]
COD <- df[, COD]
if(!is.null(cutoff)){
# truncate data
# adjust Time
time_cut <- NA
time_cut[time <= cutoff] <- time[time <= cutoff]
time_cut[is.na(time_cut)] <- cutoff
# adjust COD
COD_cut <- NA
COD_cut[time <= cutoff] <- df$COD[time <= cutoff]
COD_cut[is.na(COD_cut)] <- 0
time <- time_cut
COD <- COD_cut
}
# Kaplan-Meier estimates
fitKM <- survfit(Surv(time, COD)~ category, conf.type="log-log")
fitKMdiff <- survdiff(Surv(time, COD)~ category, rho=0) # Log-rank
# Log-Rank p-value
pval1 <- 1 - pchisq(fitKMdiff$chisq, length(fitKMdiff$n) - 1)
pval1 <- roundPVAL(pval1)
# S(t) for particular time points & factor levels
percentsLong <- data.frame(time = summary(fitKM, times=c(0, survTicks))$time,
persentSurv = roundHAFZ(summary(fitKM, times=c(0, survTicks))$surv*100),
levels = summary(fitKM, times=c(0, survTicks))$strata)
percentsLong$levels <- gsub("category=", "", percentsLong$levels)
# Long -> Wide format
percentsWide <- reshape(percentsLong, idvar = "levels", timevar = "time", direction = "wide")
namesWide <- data.frame(levels = percentsWide[ , 1])
names(namesWide) <- paste("% of survived")
percentsWide <- data.frame(percentsWide[, 2:ncol(percentsWide)])
groupsNames <- levels(as.factor(category))
# Plot
par(mai=c(1, 1, 1, 2.5)) # increase right margin space for a legend
plot(fitKM, lty=1:n, col = 1:n, lwd = 2, xlab = xlab, ylab = 'Survival probability', cex.lab = 1.2, axes = FALSE)
legend("topright", inset = c(-0.5, 0), xpd = TRUE, xjust = 0, yjust = 0.5, box.lty = 1, box.lwd = 1, groupsNames, lty = 1:n, col = 1:n, lwd = 2)
abline(h = 0.5, lty = "dotted", col = "red", lwd = 1)
xmax = max(time)
axis(side = 1, at = seq(0, xmax, xTicksBy), cex.axis = 0.8)
axis(side = 2, at = seq(0, 1, 0.1), cex.axis = 0.8)
# Add Text on plot
mylabel = paste("Log-rank p-value = ", pval1, sep = "")
mtext(mylabel, 3, line = 0, adj = 0)
title(main = title, line = 2)
addtable2plot(par('usr')[2], par('usr')[1], cex = 0.8, display.colnames = T, table = namesWide)
for(i in 1:length(survTicks)){
lines(x = rep(survTicks[i], 2),
y = c(-1, 0), lty = "dotted")
}
# every time need to shift output of % of survived, otherwise it's not exactly where want it to be;
# it's practically impossible to have <1 year of survival, thus can omit such scenario
if(ncol(percentsWide) == 2){
addtable2plot(survTicks[1] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 2]), bg="transparent")
} else if(ncol(percentsWide) == 3){
addtable2plot(survTicks[1] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 2]), bg="transparent")
addtable2plot(survTicks[2] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 3]), bg="transparent")
} else if(ncol(percentsWide) == 4){
addtable2plot(survTicks[1] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 2]), bg="transparent")
addtable2plot(survTicks[2] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 3]), bg="transparent")
addtable2plot(survTicks[3] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 4]), bg="transparent")
} else if(ncol(percentsWide) == 5){
addtable2plot(survTicks[1] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 2]), bg="transparent")
addtable2plot(survTicks[2] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 3]), bg="transparent")
addtable2plot(survTicks[3] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 4]), bg="transparent")
addtable2plot(survTicks[4] - xshift, par('usr')[1], cex=0.8, display.colnames=F, table= data.frame(percentsWide[, 5]), bg="transparent")
}
}
# K-M SURVIVAL TABLES -----------------------------------------------------
fun.KM_tbl <- function(df, strataName, COD, COD_abbr, survTimeVar, cutoff = NULL){
# example:
# df = df_surv
# strataName = "Region"
# COD = "COD_CS"
# COD_abbr = "CS"
# survTimeVar = "TimeSurv"
# cutoff = NULL
# levels of variable to be plotted
strata <- df[, strataName]
n <- length(levels(as.factor(strata)))
time <- df[, survTimeVar]
COD <- df[, COD]
if(!is.null(cutoff)){
# truncate data
# adjust Time
time_cut <- NA
time_cut[time <= cutoff] <- time[time <= cutoff]
time_cut[is.na(time_cut)] <- cutoff
# adjust COD
COD_cut <- NA
COD_cut[time <= cutoff] <- df$COD[time <= cutoff]
COD_cut[is.na(COD_cut)] <- 0
time <- time_cut
COD <- COD_cut
}
# Kaplan-Meier estimates
fitKM <- survfit(Surv(time, COD)~ strata, conf.type="log-log")
tbl <- roundHAFZ(summary(fitKM)$table[, c(1, 4:9)])
rownames(tbl) <- paste0(rownames(tbl), " (", COD_abbr, ")")
return(tbl)
}
# Cox PH -----------------------------------------------------------------
fun.Cox_multiv <- function(df, varNames, COD, survTimeVar){
# example:
# df = df_surv
# varNames = c("Age_at_diagnosis", "Region", "Race_Ethnicity", "Sex")
# COD = "COD_CS"
# survTimeVar = "TimeSurv"
# long = TRUE
CoxMult <- df[, varNames]
CoxMult <- data.frame(CoxMult, df[, c(survTimeVar, COD)])
names(CoxMult) <- c(varNames, "survTimeVar", "COD")
fit <- coxph(Surv((survTimeVar), COD) ~ ., CoxMult)
fitCoxMulti <- summary((coxph(Surv((survTimeVar), COD) ~ ., CoxMult)))
# Schoenfeld residuals (test violation of proportionality)
test <- cox.zph(fit, transform = "rank")
# want specific output from Proportional Hazards Regression Model in a matrix format
# exp(coef)
HR <- roundHAFZ(as.matrix(fitCoxMulti$coefficients)[, 2], n = 3)
# CI
CI <- roundHAFZ(as.matrix(fitCoxMulti$conf.int)[, c(3, 4)], n = 3)
# p-value
pval <- roundPVAL(roundHAFZ(as.matrix(fitCoxMulti$coefficients)[, 5], n = 3))
# combine all necessary coefficients
CoxMultiCoef <- NA
CoxMultiCoef <- as.matrix(cbind(HR,
paste(CI[, 1], "-", CI[, 2], sep=""),
pval))
# PH assumption p-value
ph <- as.vector(test$table[-dim(test$table)[1], 3], mode="numeric")
ph <- roundHAFZ(ph, n = 3)
for(i in 1:length(ph)){
if(ph[i] == 0){ph[i] = "<0.001"}
}
CoxMultiCoef <- data.frame(cbind(CoxMultiCoef, ph))
names(CoxMultiCoef) <- c("Multivariable HR", "95% CI", "p-value", "P(PH)")
return(CoxMultiCoef)
}
|
test_that("GetQualificationRequests", {
skip_if_not(CheckAWSKeys())
# GetQualificationRequests, nothing specified
GetQualificationRequests() -> result
expect_type(result, "list")
# GetQualificationRequests qual specified
SearchQualificationTypes(must.be.owner = TRUE, verbose = FALSE) -> quals
quals$QualificationTypeId[[1]] -> qual1
GetQualificationRequests(qual = as.factor(qual1)) -> result
expect_type(result, "list")
})
test_that("GetQualificationRequests error don't own qual", {
skip_if_not(CheckAWSKeys())
try(GetQualificationRequests("2YCIA0RYNJ9262B1D82MPTUEXAMPLE"), TRUE) -> result
expect_s3_class(result, 'try-error')
})
|
/tests/testthat/test-GetQualificationRequests.R
|
no_license
|
cloudyr/pyMTurkR
|
R
| false
| false
| 663
|
r
|
test_that("GetQualificationRequests", {
skip_if_not(CheckAWSKeys())
# GetQualificationRequests, nothing specified
GetQualificationRequests() -> result
expect_type(result, "list")
# GetQualificationRequests qual specified
SearchQualificationTypes(must.be.owner = TRUE, verbose = FALSE) -> quals
quals$QualificationTypeId[[1]] -> qual1
GetQualificationRequests(qual = as.factor(qual1)) -> result
expect_type(result, "list")
})
test_that("GetQualificationRequests error don't own qual", {
skip_if_not(CheckAWSKeys())
try(GetQualificationRequests("2YCIA0RYNJ9262B1D82MPTUEXAMPLE"), TRUE) -> result
expect_s3_class(result, 'try-error')
})
|
#
#
#
# load libraries
library(tidyverse); library(sp); library(sf); library(raster); library(data.table); library(mapdata)
library(maptools); library(gridExtra); library(nngeo); library(stringr); library(rgdal); library(scales)
# basemap
crs.1 <- "+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m"
land <- st_read("ne_50m_admin_0_countries.shp")
land <- st_transform(land, st_crs("+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m"))
rcp8.5_2080 <- raster("covariates/fwvel_ensemble_rcp85_2085.tif")
rcp8.5_2080 <- projectRaster(rcp8.5_2080,
crs="+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m",
res=10000, method="bilinear")
# read in base grid data
grd_100 <- st_read("100km_gridClean.shp") %>% dplyr::select(X,Y)
grd_100 <- unique(grd_100) %>% mutate(FID=row_number())
st_crs(grd_100) <- crs.1
grd_200 <- st_read("200km_gridClean.shp") %>% dplyr::select(X,Y)
grd_200 <- unique(grd_200) %>% mutate(FID=row_number())
st_crs(grd_100) <- crs.1
grd_400 <- st_read("400km_gridClean.shp") %>% dplyr::select(X,Y)
grd_400 <- unique(grd_400) %>% mutate(FID=row_number())
st_crs(grd_100) <- crs.1
# grd_800 <- st_read("800km_gridClean.shp") %>% dplyr::select(X,Y)
# grd_800 <- unique(grd_800) %>% mutate(FID=row_number())
# st_crs(grd_100) <- 102008
# human footprint map
footprint <- raster("covariates/footprint_clipped.tif")
footprint <- projectRaster(footprint,
crs="+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m",
res=10000, method="bilinear")
footprint <- crop(footprint, grd_100)
footprint[footprint > 50] <- NA
# WWF biomes
biomes <- raster("covariates/WWFBiomes.tif")
crs(biomes) <- "+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m"
biomes <- crop(biomes, grd_100)
biomes <- projectRaster(biomes,
crs="+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m",
res=10000, method="bilinear")
biomes[biomes > 50] <- NA
# read in fishnet data and count overall richness by overlap
# 100 km resolution
fsh_100_Buff <- fread("100km_Buffer.csv", header=TRUE, sep=",", stringsAsFactors=FALSE)
fsh_100_Buff <- st_as_sf(fsh_100_Buff, coords=c("X", "Y"))
st_crs(fsh_100_Buff) <- crs.1
grd_100$Buff100 <- lengths(st_intersects(grd_100, fsh_100_Buff))
# 200 km resolution
fsh_200_noBuff <- fread("200km_NoBuffer.csv", header=TRUE, sep=",", stringsAsFactors=FALSE)
fsh_200_noBuff <- st_as_sf(fsh_200_noBuff, coords=c("X", "Y"))
st_crs(fsh_200_noBuff) <- crs.1
grd_200$noBuff200 <- lengths(st_intersects(grd_200, fsh_200_noBuff))
# 400 km resolution
fsh_400_noBuff <- fread("400km_NoBuffer.csv", header=TRUE, sep=",", stringsAsFactors=FALSE)
fsh_400_noBuff <- st_as_sf(fsh_400_noBuff, coords=c("X", "Y"))
st_crs(fsh_400_noBuff) <- crs.1
grd_400$noBuff400 <- lengths(st_intersects(grd_400, fsh_400_noBuff))
# 800 km resolution
# fsh_800_noBuff <- fread("800km_NoBuffer.csv", header=TRUE, sep=",", stringsAsFactors=FALSE)
# fsh_800_noBuff <- st_as_sf(fsh_800_noBuff, coords=c("X", "Y"))
# st_crs(fsh_800_noBuff) <- 102008
#
# grd_800$noBuff800 <- lengths(st_intersects(grd_800, fsh_800_noBuff))
#
# fsh_800_Buff <- fread("800km_Buffer.csv", header=TRUE, sep=",", stringsAsFactors=FALSE)
# fsh_800_Buff <- st_as_sf(fsh_800_Buff, coords=c("X", "Y"))
# st_crs(fsh_800_Buff) <- 102008
#
# grd_800$Buff800 <- lengths(st_intersects(grd_800, fsh_800_Buff))
############################################
# read in and merge occurrence information #
############################################
ebut <- as_tibble(fread("ebut_intersections.csv", header=TRUE, sep=",", stringsAsFactors=FALSE))
ebut <- ebut %>%
mutate(year=as.numeric(str_extract(Date.Observed, "^\\d{4}"))) %>%
dplyr::select(Family, species, X, Y, year, inRM) %>%
mutate(basis="HUMAN_OBSERVATION", family=str_to_sentence(Family)) %>%
dplyr::select(-Family)
idig <- as_tibble(fread("idig_intersections.csv", header=TRUE, sep=",", stringsAsFactors=FALSE))
idig <- idig %>%
mutate(year=as.numeric(str_extract(dwc.eventDate, "^\\d{4}"))) %>%
dplyr::select(dwc.family, species, X, Y, inRM, year, basis=dwc.basisOfRecord) %>%
mutate(family=str_to_sentence(dwc.family)) %>%
dplyr::select(-dwc.family)
gbif <- as_tibble(fread("total_gbif_intersections.csv", header=TRUE, sep=",", stringsAsFactors=FALSE))
gbif <- gbif %>%
dplyr::select(family, species, X, Y, inRM, year, basis=basisOfRecord) %>%
mutate(family=str_to_sentence(family))
occur <- rbind(ebut, idig, gbif)
taxa <- unique(dplyr::select(occur, family, species))
rm(list=c("ebut", "idig", "gbif"))
##################################
# update families in fishnetting #
##################################
# 100 km resolution
fsh_100_Buff <- fsh_100_Buff %>%
left_join(taxa, by=c("scientificName"="species")) %>%
dplyr::select(-scientificName) %>%
mutate(family=str_to_sentence(family))
# 200 km resolution
fsh_200_noBuff <- fsh_200_noBuff %>%
left_join(taxa, by=c("scientificName"="species")) %>%
dplyr::select(-scientificName) %>%
mutate(family=str_to_sentence(family))
# 400 km resolution
fsh_400_noBuff <- fsh_400_noBuff %>%
left_join(taxa, by=c("scientificName"="species")) %>%
dplyr::select(-scientificName) %>%
mutate(family=str_to_sentence(family))
# 800 km resolution
# fsh_800_noBuff <- fsh_800_noBuff %>%
# left_join(taxa, by=c("scientificName"="species")) %>%
# dplyr::select(-scientificName) %>%
# mutate(family=str_to_sentence(family))
###################################
# filter fishnets to family level #
###################################
# 100 meter resolution
fsh_100_Buff_nym <- filter(fsh_100_Buff, family=="Nymphalidae")
grd_100$Buff100_nym <- lengths(st_intersects(grd_100, fsh_100_Buff_nym))
fsh_100_Buff_pap <- filter(fsh_100_Buff, family=="Papilionidae")
grd_100$Buff100_pap <- lengths(st_intersects(grd_100, fsh_100_Buff_pap))
fsh_100_Buff_lyc <- filter(fsh_100_Buff, family=="Lycaenidae")
grd_100$Buff100_lyc <- lengths(st_intersects(grd_100, fsh_100_Buff_lyc))
fsh_100_Buff_hes <- filter(fsh_100_Buff, family=="Hesperiidae")
grd_100$Buff100_hes <- lengths(st_intersects(grd_100, fsh_100_Buff_hes))
fsh_100_Buff_pie <- filter(fsh_100_Buff, family=="Pieridae")
grd_100$Buff100_pie <- lengths(st_intersects(grd_100, fsh_100_Buff_pie))
fsh_100_Buff_rio <- filter(fsh_100_Buff, family=="Riodinidae")
grd_100$Buff100_rio <- lengths(st_intersects(grd_100, fsh_100_Buff_rio))
# 200 meter resolution
fsh_200_noBuff_nym <- filter(fsh_200_noBuff, family=="Nymphalidae")
grd_200$noBuff200_nym <- lengths(st_intersects(grd_200, fsh_200_noBuff_nym))
fsh_200_noBuff_pap <- filter(fsh_200_noBuff, family=="Papilionidae")
grd_200$noBuff200_pap <- lengths(st_intersects(grd_200, fsh_200_noBuff_pap))
fsh_200_noBuff_lyc <- filter(fsh_200_noBuff, family=="Lycaenidae")
grd_200$noBuff200_lyc <- lengths(st_intersects(grd_200, fsh_200_noBuff_lyc))
fsh_200_noBuff_hes <- filter(fsh_200_noBuff, family=="Hesperiidae")
grd_200$noBuff200_hes <- lengths(st_intersects(grd_200, fsh_200_noBuff_hes))
fsh_200_noBuff_pie <- filter(fsh_200_noBuff, family=="Pieridae")
grd_200$noBuff200_pie <- lengths(st_intersects(grd_200, fsh_200_noBuff_pie))
fsh_200_noBuff_rio <- filter(fsh_200_noBuff, family=="Riodinidae")
grd_200$noBuff200_rio <- lengths(st_intersects(grd_200, fsh_200_noBuff_rio))
# 400 meter resolution
fsh_400_noBuff_nym <- filter(fsh_400_noBuff, family=="Nymphalidae")
grd_400$noBuff400_nym <- lengths(st_intersects(grd_400, fsh_400_noBuff_nym))
fsh_400_noBuff_pap <- filter(fsh_400_noBuff, family=="Papilionidae")
grd_400$noBuff400_pap <- lengths(st_intersects(grd_400, fsh_400_noBuff_pap))
fsh_400_noBuff_lyc <- filter(fsh_400_noBuff, family=="Lycaenidae")
grd_400$noBuff400_lyc <- lengths(st_intersects(grd_400, fsh_400_noBuff_lyc))
fsh_400_noBuff_hes <- filter(fsh_400_noBuff, family=="Hesperiidae")
grd_400$noBuff400_hes <- lengths(st_intersects(grd_400, fsh_400_noBuff_hes))
fsh_400_noBuff_pie <- filter(fsh_400_noBuff, family=="Pieridae")
grd_400$noBuff400_pie <- lengths(st_intersects(grd_400, fsh_400_noBuff_pie))
fsh_400_noBuff_rio <- filter(fsh_400_noBuff, family=="Riodinidae")
grd_400$noBuff400_rio <- lengths(st_intersects(grd_400, fsh_400_noBuff_rio))
# 800 meter resolution
# fsh_800_Buff_nym <- filter(fsh_800_noBuff, family=="Nymphalidae")
# grd_800$Buff800_nym <- lengths(st_intersects(grd_800, fsh_800_Buff_nym))
#
# fsh_800_Buff_pap <- filter(fsh_800_noBuff, family=="Papilionidae")
# grd_800$Buff800_pap <- lengths(st_intersects(grd_800, fsh_800_Buff_pap))
#
# fsh_800_Buff_lyc <- filter(fsh_800_noBuff, family=="Lycaenidae")
# grd_800$Buff800_lyc <- lengths(st_intersects(grd_800, fsh_800_Buff_lyc))
#
# fsh_800_Buff_hes <- filter(fsh_800_noBuff, family=="Hesperiidae")
# grd_800$Buff800_hes <- lengths(st_intersects(grd_800, fsh_800_Buff_hes))
#
# fsh_800_Buff_pie <- filter(fsh_800_noBuff, family=="Pieridae")
# grd_800$Buff800_pie <- lengths(st_intersects(grd_800, fsh_800_Buff_pie))
#
# fsh_800_Buff_rio <- filter(fsh_800_noBuff, family=="Riodinidae")
# grd_800$Buff800_rio <- lengths(st_intersects(grd_800, fsh_800_Buff_rio))
#############################################
# convert occurrence data to spatial object #
#############################################
# filter for records between 1950-2019
occur <- occur %>% filter(between(year, 1950, 2019))
occur <- st_as_sf(occur, coords=c("X", "Y"))
st_crs(occur) <- crs.1
###############################################
# apply filters for basis of record attribute #
###############################################
occur <- occur
occur_inRM <- filter(occur, inRM=="Yes")
occur_spec <- filter(occur_inRM, basis!="HUMAN_OBSERVATION", basis!="UNKNOWN", basis!="machineobservation", basis!="MACHINE_OBSERVATION")
occur_obse <- filter(occur_inRM, basis=="HUMAN_OBSERVATION")
nrow(filter(occur, inRM=="Yes"))/nrow(occur) # number in range overall
inRMYears <- as.data.frame(occur) %>% filter(inRM=="Yes") %>%
group_by(year) %>% summarise(n=n())
outRMYears <- as.data.frame(occur) %>% filter(inRM=="No") %>%
group_by(year) %>% summarise(n=n())
totalRMYears <- merge(outRMYears, inRMYears, by="year") %>%
filter(between(year, 1950, 2019)) %>% mutate(perc.n.in = n.y/(n.x+n.y))
plot(totalRMYears$year, totalRMYears$perc.n.in)
mean(totalRMYears$perc.n.in)
1.96*sd(totalRMYears$perc.n.in)/sqrt(nrow(totalRMYears))
mean(filter(totalRMYears, between(year, 2010, 2019))$perc.n.in)
1.96*sd(filter(totalRMYears, between(year, 2010, 2019))$perc.n.in)/sqrt(nrow(filter(totalRMYears, between(year, 2010, 2019))))
occur_nym <- filter(occur_inRM, family=="Nymphalidae")
occur_pap <- filter(occur_inRM, family=="Papilionidae")
occur_lyc <- filter(occur_inRM, family=="Lycaenidae")
occur_hes <- filter(occur_inRM, family=="Hesperiidae")
occur_pie <- filter(occur_inRM, family=="Pieridae")
occur_spec_nym <- filter(occur_spec, family=="Nymphalidae")
occur_spec_pap <- filter(occur_spec, family=="Papilionidae")
occur_spec_lyc <- filter(occur_spec, family=="Lycaenidae")
occur_spec_hes <- filter(occur_spec, family=="Hesperiidae")
occur_spec_pie <- filter(occur_spec, family=="Pieridae")
occur_obse_nym <- filter(occur_obse, family=="Nymphalidae")
occur_obse_pap <- filter(occur_obse, family=="Papilionidae")
occur_obse_lyc <- filter(occur_obse, family=="Lycaenidae")
occur_obse_hes <- filter(occur_obse, family=="Hesperiidae")
occur_obse_pie <- filter(occur_obse, family=="Pieridae")
#
# occur_t1 <- filter(occur, between(year, 1950, 1969))
# occur_t2 <- filter(occur, between(year, 1970, 1989))
# occur_t3 <- filter(occur, between(year, 1990, 2009))
# occur_t4 <- filter(occur, between(year, 2010, 2019))
#################################
# count unique species in grids #
#################################
# 100 meter resolution
grd_100_allRich <- occur %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesAll=n_distinct(species))
grd_100_inRMRich <- occur_inRM %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRM=n_distinct(species))
grd_100_inRMspecRich <- occur_spec %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMSpec=n_distinct(species))
grd_100_inRMobseRich <- occur_obse %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMObse=n_distinct(species))
grd_100_inRMnym <- occur_nym %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMNym=n_distinct(species))
grd_100_inRMpap <- occur_pap %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMPap=n_distinct(species))
grd_100_inRMlyc <- occur_lyc %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMlyc=n_distinct(species))
grd_100_inRMhes <- occur_hes %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMhes=n_distinct(species))
grd_100_inRMpie <- occur_pie %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMpie=n_distinct(species))
grd_100_inRMspecnym <- occur_spec_nym %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMspecNym=n_distinct(species))
grd_100_inRMspecpap <- occur_spec_pap %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMspecPap=n_distinct(species))
grd_100_inRMspeclyc <- occur_spec_lyc %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMspeclyc=n_distinct(species))
grd_100_inRMspeches <- occur_spec_hes %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMspeches=n_distinct(species))
grd_100_inRMspecpie <- occur_spec_pie %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMspecpie=n_distinct(species))
grd_100_inRMobsenym <- occur_obse_nym %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMobseNym=n_distinct(species))
grd_100_inRMobsepap <- occur_obse_pap %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMobsePap=n_distinct(species))
grd_100_inRMobselyc <- occur_obse_lyc %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMobselyc=n_distinct(species))
grd_100_inRMobsehes <- occur_obse_hes %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMobsehes=n_distinct(species))
grd_100_inRMobsepie <- occur_obse_pie %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMobsepie=n_distinct(species))
# grd_100_t1 <- occur_t1 %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesT1=n_distinct(species))
# grd_100_t2 <- occur_t2 %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciest2=n_distinct(species))
# grd_100_t3 <- occur_t3 %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciest3=n_distinct(species))
# grd_100_t4 <- occur_t4 %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciest4=n_distinct(species))
# merge back with 100 grid
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_allRich), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMRich), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMspecRich), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMobseRich), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMnym), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMpap), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMlyc), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMhes), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMpie), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMspecnym), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMspecpap), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMspeclyc), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMspeches), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMspecpie), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMobsenym), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMobsepap), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMobselyc), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMobsehes), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMobsepie), by="FID")
# grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_t1), by="FID")
# grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_t2), by="FID")
# grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_t3), by="FID")
# grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_t4), by="FID")
# 200 meter resolution
grd_200_allRich <- occur %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesAll=n_distinct(species))
grd_200_inRMRich <- occur_inRM %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRM=n_distinct(species))
grd_200_inRMSpecRich <- occur_spec %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRMSpec=n_distinct(species))
grd_200_inRMObseRich <- occur_obse %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRMObse=n_distinct(species))
grd_200_inRMObsenym <- occur_obse_nym %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRMObseNym=n_distinct(species))
grd_200_inRMObsepap <- occur_obse_pap %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRMObsePap=n_distinct(species))
grd_200_inRMObselyc <- occur_obse_lyc %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRMObselyc=n_distinct(species))
grd_200_inRMObsehes <- occur_obse_hes %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRMObsehes=n_distinct(species))
grd_200_inRMObsepie <- occur_obse_pie %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRMObsepie=n_distinct(species))
# grd_200_t1 <- occur_t1 %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesT1=n_distinct(species))
# grd_200_t2 <- occur_t2 %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciest2=n_distinct(species))
# grd_200_t3 <- occur_t3 %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciest3=n_distinct(species))
# grd_200_t4 <- occur_t4 %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciest4=n_distinct(species))
# merge back with 200 grid
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_allRich), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMRich), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMSpecRich), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMObseRich), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMObsenym), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMObsepap), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMObselyc), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMObsehes), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMObsepie), by="FID")
# grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_t1), by="FID")
# grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_t2), by="FID")
# grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_t3), by="FID")
# grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_t4), by="FID")
# 400 meter resolution
grd_400_allRich <- occur %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesAll=n_distinct(species))
grd_400_inRMRich <- occur_inRM %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRM=n_distinct(species))
grd_400_inRMSpecRich <- occur_spec %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRMSpec=n_distinct(species))
grd_400_inRMObseRich <- occur_obse %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRMObse=n_distinct(species))
grd_400_inRMObsenym <- occur_obse_nym %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRMObseNym=n_distinct(species))
grd_400_inRMObsepap <- occur_obse_pap %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRMObsePap=n_distinct(species))
grd_400_inRMObselyc <- occur_obse_lyc %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRMObselyc=n_distinct(species))
grd_400_inRMObsehes <- occur_obse_hes %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRMObsehes=n_distinct(species))
grd_400_inRMObsepie <- occur_obse_pie %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRMObsepie=n_distinct(species))
# grd_400_t1 <- occur_t1 %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesT1=n_distinct(species))
# grd_400_t2 <- occur_t2 %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciest2=n_distinct(species))
# grd_400_t3 <- occur_t3 %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciest3=n_distinct(species))
# grd_400_t4 <- occur_t4 %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciest4=n_distinct(species))
# merge back with 400 grid
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_allRich), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMRich), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMSpecRich), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMObseRich), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMObsenym), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMObsepap), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMObselyc), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMObsehes), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMObsepie), by="FID")
# grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_t1), by="FID")
# grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_t2), by="FID")
# grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_t3), by="FID")
# grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_t4), by="FID")
# 800 meter resolution
# grd_800_allRich <- occur %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesAll=n_distinct(species))
# grd_800_inRMRich <- occur_inRM %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRM=n_distinct(species))
# grd_800_inRMSpecRich <- occur_spec %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRMSpec=n_distinct(species))
# grd_800_inRMObseRich <- occur_obse %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRMObse=n_distinct(species))
#
# grd_800_inRMObsenym <- occur_obse_nym %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRMObseNym=n_distinct(species))
# grd_800_inRMObsepap <- occur_obse_pap %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRMObsePap=n_distinct(species))
# grd_800_inRMObselyc <- occur_obse_lyc %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRMObselyc=n_distinct(species))
# grd_800_inRMObsehes <- occur_obse_hes %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRMObsehes=n_distinct(species))
# grd_800_inRMObsepie <- occur_obse_pie %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRMObsepie=n_distinct(species))
#
# grd_800_t1 <- occur_t1 %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesT1=n_distinct(species))
# grd_800_t2 <- occur_t2 %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciest2=n_distinct(species))
# grd_800_t3 <- occur_t3 %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciest3=n_distinct(species))
# grd_800_t4 <- occur_t4 %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciest4=n_distinct(species))
#
# # merge back with 800 grid
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_allRich), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMRich), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMSpecRich), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMObseRich), by="FID")
#
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMObsenym), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMObsepap), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMObselyc), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMObsehes), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMObsepie), by="FID")
#
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_t1), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_t2), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_t3), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_t4), by="FID")
####################
# calculate ratios #
####################
# 100 km resolution
grd_100 <- grd_100 %>% mutate(logInRMOccur_Buff = n_speciesInRM/Buff100,
logInRMSpec_Buff = n_speciesInRMSpec/Buff100,
logInRMObse_Buff = n_speciesInRMObse/Buff100,
nymobsRatio = n_speciesInRMobseNym/Buff100_nym,
papobsRatio = n_speciesInRMobsePap/Buff100_pap,
lycobsRatio = n_speciesInRMobselyc/Buff100_lyc,
hesobsRatio = n_speciesInRMobsehes/Buff100_hes,
pieobsRatio = n_speciesInRMobsepie/Buff100_pie,
nymspecRatio = n_speciesInRMspecNym/Buff100_nym,
papspecRatio = n_speciesInRMspecPap/Buff100_pap,
lycspecRatio = n_speciesInRMspeclyc/Buff100_lyc,
hesspecRatio = n_speciesInRMspeches/Buff100_hes,
piespecRatio = n_speciesInRMspecpie/Buff100_pie,
nymRatio = n_speciesInRMNym/Buff100_nym,
papRatio = n_speciesInRMPap/Buff100_pap,
lycRatio = n_speciesInRMlyc/Buff100_lyc,
hesRatio = n_speciesInRMhes/Buff100_hes,
pieRatio = n_speciesInRMpie/Buff100_pie) %>%
# t1Ratio = n_speciesT1/Buff100,
# t2Ratio = n_speciest2/Buff100,
# t3Ratio = n_speciest3/Buff100,
# t4Ratio = n_speciest4/Buff100) %>%
mutate(logInRMOccur_Buff = ifelse(logInRMOccur_Buff > 1, 1, logInRMOccur_Buff),
logInRMSpec_Buff = ifelse(logInRMSpec_Buff > 1, 1, logInRMSpec_Buff),
logInRMObse_Buff = ifelse(logInRMObse_Buff > 1, 1, logInRMObse_Buff),
nymobsRatio = ifelse(nymobsRatio > 1, 1, nymobsRatio),
papobsRatio = ifelse(papobsRatio > 1, 1, papobsRatio),
lycobsRatio = ifelse(lycobsRatio > 1, 1, lycobsRatio),
hesobsRatio = ifelse(hesobsRatio > 1, 1, hesobsRatio),
pieobsRatio = ifelse(pieobsRatio > 1, 1, pieobsRatio),
nymspecRatio = ifelse(nymspecRatio > 1, 1, nymspecRatio),
papspecRatio = ifelse(papspecRatio > 1, 1, papspecRatio),
lycspecRatio = ifelse(lycspecRatio > 1, 1, lycspecRatio),
hesspecRatio = ifelse(hesspecRatio > 1, 1, hesspecRatio),
piespecRatio = ifelse(piespecRatio > 1, 1, piespecRatio),
nymRatio = ifelse(nymRatio > 1, 1, nymRatio),
papRatio = ifelse(papRatio > 1, 1, papRatio),
lycRatio = ifelse(lycRatio > 1, 1, lycRatio),
hesRatio = ifelse(hesRatio > 1, 1, hesRatio),
pieRatio = ifelse(pieRatio > 1, 1, pieRatio))
# t1Ratio = ifelse(t1Ratio > 1, 1, t1Ratio),
# t2Ratio = ifelse(t2Ratio > 1, 1, t2Ratio),
# t3Ratio = ifelse(t3Ratio > 1, 1, t3Ratio),
# t4Ratio = ifelse(t4Ratio > 1, 1, t4Ratio))
# 200 km resolution
grd_200 <- grd_200 %>% mutate(logAllOccur_NoBuff = n_speciesAll/noBuff200,
logInRMOccur_NoBuff = n_speciesInRM/noBuff200,
logInRMOccur_Buff = n_speciesInRM/noBuff200,
logInRMSpec_Buff = n_speciesInRMSpec/noBuff200,
logInRMObse_Buff = n_speciesInRMObse/noBuff200,
nymObsRatio = n_speciesInRMObseNym/noBuff200_nym,
papObsRatio = n_speciesInRMObsePap/noBuff200_pap,
lycObsRatio = n_speciesInRMObselyc/noBuff200_lyc,
hesObsRatio = n_speciesInRMObsehes/noBuff200_hes,
pieObsRatio = n_speciesInRMObsepie/noBuff200_pie) %>%
# t1Ratio = n_speciesT1/Buff200,
# t2Ratio = n_speciest2/Buff200,
# t3Ratio = n_speciest3/Buff200,
# t4Ratio = n_speciest4/Buff200) %>%
mutate(logInRMOccur_Buff = ifelse(logInRMOccur_Buff > 1, 1, logInRMOccur_Buff),
logInRMSpec_Buff = ifelse(logInRMSpec_Buff > 1, 1, logInRMSpec_Buff),
logInRMObse_Buff = ifelse(logInRMObse_Buff > 1, 1, logInRMObse_Buff),
nymObsRatio = ifelse(nymObsRatio > 1, 1, nymObsRatio),
papObsRatio = ifelse(papObsRatio > 1, 1, papObsRatio),
lycObsRatio = ifelse(lycObsRatio > 1, 1, lycObsRatio),
hesObsRatio = ifelse(hesObsRatio > 1, 1, hesObsRatio),
pieObsRatio = ifelse(pieObsRatio > 1, 1, pieObsRatio))
# t1Ratio = ifelse(t1Ratio > 1, 1, t1Ratio),
# t2Ratio = ifelse(t2Ratio > 1, 1, t2Ratio),
# t3Ratio = ifelse(t3Ratio > 1, 1, t3Ratio),
# t4Ratio = ifelse(t4Ratio > 1, 1, t4Ratio))
# 400 km resolution
grd_400 <- grd_400 %>% mutate(logAllOccur_NoBuff = n_speciesAll/noBuff400,
logInRMOccur_NoBuff = n_speciesInRM/noBuff400,
logInRMOccur_Buff = n_speciesInRM/noBuff400,
logInRMSpec_Buff = n_speciesInRMSpec/noBuff400,
logInRMObse_Buff = n_speciesInRMObse/noBuff400,
nymObsRatio = n_speciesInRMObseNym/noBuff400_nym,
papObsRatio = n_speciesInRMObsePap/noBuff400_pap,
lycObsRatio = n_speciesInRMObselyc/noBuff400_lyc,
hesObsRatio = n_speciesInRMObsehes/noBuff400_hes,
pieObsRatio = n_speciesInRMObsepie/noBuff400_pie) %>%
# t1Ratio = n_speciesT1/Buff400,
# t2Ratio = n_speciest2/Buff400,
# t3Ratio = n_speciest3/Buff400,
# t4Ratio = n_speciest4/Buff400) %>%
mutate(logInRMOccur_Buff = ifelse(logInRMOccur_Buff > 1, 1, logInRMOccur_Buff),
logInRMSpec_Buff = ifelse(logInRMSpec_Buff > 1, 1, logInRMSpec_Buff),
logInRMObse_Buff = ifelse(logInRMObse_Buff > 1, 1, logInRMObse_Buff),
nymObsRatio = ifelse(nymObsRatio > 1, 1, nymObsRatio),
papObsRatio = ifelse(papObsRatio > 1, 1, papObsRatio),
lycObsRatio = ifelse(lycObsRatio > 1, 1, lycObsRatio),
hesObsRatio = ifelse(hesObsRatio > 1, 1, hesObsRatio),
pieObsRatio = ifelse(pieObsRatio > 1, 1, pieObsRatio))
# t1Ratio = ifelse(t1Ratio > 1, 1, t1Ratio),
# t2Ratio = ifelse(t2Ratio > 1, 1, t2Ratio),
# t3Ratio = ifelse(t3Ratio > 1, 1, t3Ratio),
# t4Ratio = ifelse(t4Ratio > 1, 1, t4Ratio))
# # 800 km resolution
# grd_800 <- grd_800 %>% mutate(logAllOccur_NoBuff = n_speciesAll/noBuff800,
# logInRMOccur_NoBuff = n_speciesInRM/noBuff800,
# logInRMOccur_Buff = n_speciesInRM/Buff800,
# logInRMSpec_Buff = n_speciesInRMSpec/Buff800,
# logInRMObse_Buff = n_speciesInRMObse/Buff800,
# nymObsRatio = n_speciesInRMObseNym/Buff800_nym,
# papObsRatio = n_speciesInRMObsePap/Buff800_pap,
# lycObsRatio = n_speciesInRMObselyc/Buff800_lyc,
# hesObsRatio = n_speciesInRMObsehes/Buff800_hes,
# pieObsRatio = n_speciesInRMObsepie/Buff800_pie,
# t1Ratio = n_speciesT1/Buff800,
# t2Ratio = n_speciest2/Buff800,
# t3Ratio = n_speciest3/Buff800,
# t4Ratio = n_speciest4/Buff800) %>%
# mutate(logInRMOccur_Buff = ifelse(logInRMOccur_Buff > 1, 1, logInRMOccur_Buff),
# logInRMSpec_Buff = ifelse(logInRMSpec_Buff > 1, 1, logInRMSpec_Buff),
# logInRMObse_Buff = ifelse(logInRMObse_Buff > 1, 1, logInRMObse_Buff),
# nymObsRatio = ifelse(nymObsRatio > 1, 1, nymObsRatio),
# papObsRatio = ifelse(papObsRatio > 1, 1, papObsRatio),
# lycObsRatio = ifelse(lycObsRatio > 1, 1, lycObsRatio),
# hesObsRatio = ifelse(hesObsRatio > 1, 1, hesObsRatio),
# pieObsRatio = ifelse(pieObsRatio > 1, 1, pieObsRatio),
# t1Ratio = ifelse(t1Ratio > 1, 1, t1Ratio),
# t2Ratio = ifelse(t2Ratio > 1, 1, t2Ratio),
# t3Ratio = ifelse(t3Ratio > 1, 1, t3Ratio),
# t4Ratio = ifelse(t4Ratio > 1, 1, t4Ratio))
#######################
## Sample Covariates ##
#######################
grd_100 <- unique(grd_100)
# RCP 8.5 Climate Velocity into 2085
grd_100$rcp85_2080 <- extract(rcp8.5_2080, grd_100, fun=mean, na.rm=TRUE)
grd_100 <- grd_100 %>% mutate(clim.rank = percent_rank(rcp85_2080)) %>%
mutate(clim.cat = ifelse(clim.rank >= 0.95, "2", ifelse(clim.rank >= 0.80, "1", "0")))
grd_100$clim.cat <- as.character(grd_100$clim.cat)
grd_100pt <- st_centroid(grd_100)
grd_100pt <- grd_100pt %>% mutate(x.crd = st_coordinates(grd_100pt)[,1], y.crd = st_coordinates(grd_100pt)[,2])
climate <- dplyr::select(grd_100pt, clim.cat, logInRMOccur_Buff)
grd_100pt <- grd_100pt %>% mutate(logInRMOccur_Buff=replace_na(logInRMOccur_Buff, 0))
# plot climate velocities on grid, highlight 80th and 95th percentiles
tiff("climateMap_legend.tiff", units="cm", width=12.5, height=12.5, res=350)
ggplot()+
geom_sf(data=land, fill="white", color="black")+
#geom_sf(grd_100, mapping=aes(fill=clim.cat), color=NA, alpha=0.7)+
geom_point(grd_100pt, mapping=aes(x=x.crd, y=y.crd,
color=clim.cat,
size=1-logInRMOccur_Buff),
alpha=0.5, shape=15)+
scale_color_manual(values=c("#3c9ab2", "#e8c927", "#f22300"))+
#scale_color_manual(values=c("black", "white"))+
scale_size_continuous(range=c(0.01, 1.99999))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()
dev.off()
climate <- filter(climate, !is.na(rcp85_2080))
# get percent sampled over 80% for each climate percentile.
1-nrow(filter(climate, clim.cat==1, logInRMOccur_Buff >= 0.80))/nrow(filter(climate, clim.cat==1))
1-nrow(filter(climate, clim.cat==2, logInRMOccur_Buff >= 0.80))/nrow(filter(climate, clim.cat==2))
# human footprint and majority biome
library(exactextractr)
getMode <- function(x,...){
uniqx <- unique(x)
uniqx <- uniqx[!is.na(uniqx)]
uniqx[which.max(tabulate(match(x, uniqx)))]
}
nature <- fread("covariates/PA_grid/PA_grid_area.csv")
grd_100$ID <- paste(grd_100$X, grd_100$Y)
grd_100 <- grd_100 %>% left_join(nature, by="ID")
grd_100$footprint <- exact_extract(footprint, grd_100, fun="mean")
grd_100$biome <- exact_extract(biomes, grd_100, fun="majority")
fit.all <- lm(logInRMOccur_Buff~footprint+grid_area, data=grd_100)
fit.spec <- lm(logInRMSpec_Buff~footprint+grid_area, data=grd_100)
fit.obse <- lm(logInRMObse_Buff~footprint+grid_area, data=grd_100)
fit.all.1 <- lm(logInRMOccur_Buff~footprint, data=grd_100)
fit.spec.1 <- lm(logInRMSpec_Buff~footprint, data=grd_100)
fit.obse.1 <- lm(logInRMObse_Buff~footprint, data=grd_100)
extractAIC(fit.all)-extractAIC(fit.all.1)
extractAIC(fit.spec)-extractAIC(fit.spec.1)
extractAIC(fit.obse)-extractAIC(fit.obse.1)
t.test(grd_100$logInRMSpec_Buff, grd_100$logInRMObse_Buff)
sd(na.omit(grd_100$logInRMObse_Buff))/sqrt(length(na.omit(grd_100$logInRMObse_Buff)))
sd(na.omit(grd_100$logInRMSpec_Buff))/sqrt(length(na.omit(grd_100$logInRMSpec_Buff)))
# get summary stats for biomes
grd_100$biome <- round(grd_100$biome, digits=0)
biom.st <- as.data.frame(dplyr::select(grd_100, biome, logInRMOccur_Buff, logInRMObse_Buff, logInRMSpec_Buff,
nymobsRatio, papobsRatio, lycobsRatio, hesobsRatio, pieobsRatio)) %>%
mutate(biome=floor(biome))
biom.fl <- biom.st %>% group_by(biome) %>%
summarise(meanAll=mean(logInRMOccur_Buff, na.rm=TRUE), meanobse=mean(logInRMObse_Buff, na.rm=TRUE), meanSpec=mean(logInRMSpec_Buff, na.rm=TRUE),
meanNym=mean(nymobsRatio, na.rm=TRUE), meanPap=mean(papobsRatio, na.rm=TRUE), meanLyc=mean(lycobsRatio, na.rm=TRUE), meanHes=mean(hesobsRatio, na.rm=TRUE),
meanPie=mean(pieobsRatio, na.rm=TRUE),
sdAll=sd(logInRMOccur_Buff, na.rm=TRUE), sdobse=sd(logInRMObse_Buff, na.rm=TRUE), sdSpec=sd(logInRMSpec_Buff, na.rm=TRUE),
sdNym=sd(nymobsRatio, na.rm=TRUE), sdPap=sd(papobsRatio, na.rm=TRUE), sdLyc=sd(lycobsRatio, na.rm=TRUE), sdHes=sd(hesobsRatio, na.rm=TRUE),
sdPie=sd(pieobsRatio, na.rm=TRUE), count=n()) %>%
mutate(colorAll = ifelse(meanAll >= 0.8, "> 80%", ifelse(meanAll >= 0.5, "50% <= x < 80%", "< 50%")),
colorobs = ifelse(meanobse >= 0.8, "> 80%", ifelse(meanobse >= 0.5, "50% <= x < 80%", "< 50%")),
colorSpe = ifelse(meanSpec >= 0.8, "> 80%", ifelse(meanSpec >= 0.5, "50% <= x < 80%", "< 50%")))
biom.st <- biom.st %>% group_by(biome) %>%
summarise(meanAll=mean(logInRMOccur_Buff, na.rm=TRUE), meanobse=mean(logInRMObse_Buff, na.rm=TRUE), meanSpec=mean(logInRMSpec_Buff, na.rm=TRUE),
meanNym=mean(nymobsRatio, na.rm=TRUE), meanPap=mean(papobsRatio, na.rm=TRUE), meanLyc=mean(lycobsRatio, na.rm=TRUE), meanHes=mean(hesobsRatio, na.rm=TRUE),
meanPie=mean(pieobsRatio, na.rm=TRUE),
sdAll=sd(logInRMOccur_Buff, na.rm=TRUE), sdobse=sd(logInRMObse_Buff, na.rm=TRUE), sdSpec=sd(logInRMSpec_Buff, na.rm=TRUE),
sdNym=sd(nymobsRatio, na.rm=TRUE), sdPap=sd(papobsRatio, na.rm=TRUE), sdLyc=sd(lycobsRatio, na.rm=TRUE), sdHes=sd(hesobsRatio, na.rm=TRUE),
sdPie=sd(pieobsRatio, na.rm=TRUE), count=n()) %>%
mutate(colorAll = ifelse(meanAll >= 0.8, ">= 80%", ifelse(meanAll >= 0.5, "50% <= x < 80%", "< 50%")),
colorobs = ifelse(meanobse >= 0.8, ">= 80%", ifelse(meanobse >= 0.5, "50% <= x < 80%", "< 50%")),
colorSpe = ifelse(meanSpec >= 0.8, ">= 80%", ifelse(meanSpec >= 0.5, "50% <= x < 80%", "< 50%"))) %>%
filter(count > 10) %>%
mutate(coord=row_number(), coordAll=1, coordobse=3, coordSpec=5, coordNym=8, coordPap=10, coordLyc=12, coordHes=14, coordPie=16)
library(ggforce)
tiff("circles.tiff", units="cm", width=8.3, height=8.3, res=350)
ggplot(biom.st)+
geom_circle(aes(x0=coordAll, y0=coord, r=meanAll/2+sdAll/2, color=colorAll, fill=colorAll), show.legend=FALSE)+
geom_circle(aes(x0=coordAll, y0=coord, r=meanAll/2-sdAll/2, color=colorAll), fill="white")+
geom_circle(aes(x0=coordAll, y0=coord, r=meanAll/2), color="black", fill=NA)+
geom_circle(aes(x0=coordobse, y0=coord, r=meanobse/2+sdobse/2, color=colorobs, fill=colorobs), show.legend=FALSE)+
geom_circle(aes(x0=coordobse, y0=coord, r=meanobse/2-sdobse/2, color=colorobs), fill="white")+
geom_circle(aes(x0=coordobse, y0=coord, r=meanobse/2), color="black", fill=NA)+
geom_circle(aes(x0=coordSpec, y0=coord, r=meanSpec/2+sdSpec/2, color=colorSpe, fill=colorSpe), show.legend=FALSE)+
geom_circle(aes(x0=coordSpec, y0=coord, r=meanSpec/2-sdSpec/2, color=colorSpe), fill="white")+
geom_circle(aes(x0=coordSpec, y0=coord, r=meanSpec/2), color="black", fill=NA)+
# geom_circle(aes(x0=coordHes, y0=coord, r=meanHes/2+sdHes/2), color="grey", fill="grey")+
# geom_circle(aes(x0=coordHes, y0=coord, r=meanHes/2-sdHes/2), color="grey", fill="white")+
# geom_circle(aes(x0=coordHes, y0=coord, r=meanHes/2), bolor="black", fill=NA)+
# geom_circle(aes(x0=coordLyc, y0=coord, r=meanLyc/2+sdLyc/2), color="grey", fill="grey")+
# geom_circle(aes(x0=coordLyc, y0=coord, r=meanLyc/2-sdLyc/2), color="grey", fill="white")+
# geom_circle(aes(x0=coordLyc, y0=coord, r=meanLyc/2), bolor="black", fill=NA)+
# geom_circle(aes(x0=coordNym, y0=coord, r=meanNym/2+sdNym/2), color="grey", fill="grey")+
# geom_circle(aes(x0=coordNym, y0=coord, r=meanNym/2-sdNym/2), color="grey", fill="white")+
# geom_circle(aes(x0=coordNym, y0=coord, r=meanNym/2), bolor="black", fill=NA)+
# geom_circle(aes(x0=coordPap, y0=coord, r=meanPap/2+sdPap/2), color="grey", fill="grey")+
# geom_circle(aes(x0=coordPap, y0=coord, r=meanPap/2-sdPap/2), color="grey", fill="white")+
# geom_circle(aes(x0=coordPap, y0=coord, r=meanPap/2), bolor="black", fill=NA)+
# geom_circle(aes(x0=coordPie, y0=coord, r=meanPie/2+sdPie/2), color="grey", fill="grey")+
# geom_circle(aes(x0=coordPie, y0=coord, r=meanPie/2-sdPie/2), color="grey", fill="white")+
# geom_circle(aes(x0=coordPie, y0=coord, r=meanPie/2), color="black", fill=NA)+
#geom_circle(aes(x0=c(0,1,2,3), y0=c(0,0,0,0), r=c(0.25/2, 0.5/2, 0.75/2, 1/2)))+
scale_fill_manual(values=c("firebrick2", "seagreen3", "goldenrod1"))+
scale_color_manual(values=c("firebrick2", "seagreen3", "goldenrod1"))+
xlim(0, 11) + ylim(0, 11)+
theme_void()+
theme(legend.position = "none") + labs(tag="(a)")
dev.off()
b.ply <- st_read("covariates/WWFBiomes.shp")
b.ply <- st_transform(b.ply, crs.1)
b.ply <- b.ply %>% left_join(biom.st, by=c("BIOME" = "biome"))
tiff("circles_legend.tiff", units="cm", width=8.3, height=8.3, res=350)
ggplot()+
geom_circle(aes(x0=c(1,2,3,4), y0=c(1,1,1,1), r=c(0.25/2, 0.5/2, 0.75/2, 1/2)))+
xlim(0, 11) + ylim(0, 11)+
theme_void() + labs(tag=" ")
dev.off()
# ggplot()+
# geom_sf(data=land, fill="grey", color=NA)+
# geom_sf(b.ply, mapping=aes(fill=as.factor(BIOME)), color="grey", size=0.05)+
# scale_fill_manual(values=c("#839791", "#839791", "#839791",
# "#839791", "#839791", "#839791",
# "#F28F3B", "#F28F3B", "#F28F3B",
# "#F28F3B", "#575761", "#F4D6CC",
# "#F4D6CC", "#F4D6CC", NA, NA))+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
tiff("biome_map.tiff", units="in", width=8.3, height=8.3, res=350)
ggplot()+
geom_sf(data=land, fill="grey", color=NA)+
geom_sf(b.ply, mapping=aes(fill=colorAll), color=NA, size=0.05)+
scale_fill_manual(values=c("firebrick2", "seagreen3", "goldenrod1"))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+labs(tag="(b)")
dev.off()
###################
# yearly analysis #
###################
yearly <- data.frame(startYear <- integer(),
endYear <- integer(),
specCom <- double(),
obsCom <- double(),
specAve <- double(),
obsAve <- double())
grd_100c <- grd_100
# biyearly
i = 1950
while(i < 2019){
print(paste("Processing years: ", i, "-", i+1))
grd_100c <- grd_100
yearly[i-1949,1] <- i
yearly[i-1949,2] <- i+1
occur_spec <- filter(occur_inRM, between(year, i, i+1), basis!="HUMAN_OBSERVATION")
occur_obse <- filter(occur_inRM, between(year, i, i+1), basis=="HUMAN_OBSERVATION")
grd_100_biyearlyspec <- occur_spec %>% st_join(grd_100c) %>% group_by(FID) %>% summarise(n_speciesInRMSpec=n_distinct(species))
grd_100_biyearlyobs <- occur_obse %>% st_join(grd_100c) %>% group_by(FID) %>% summarise(n_speciesInRMObse=n_distinct(species))
grd_100c <- grd_100c %>% left_join(as.data.frame(grd_100_biyearlyspec), by="FID")
grd_100c <- grd_100c %>% left_join(as.data.frame(grd_100_biyearlyobs), by="FID")
grd_100c <- grd_100c %>% mutate(logInRMSpec_Buff = n_speciesInRMSpec.y/Buff100,
logInRMObse_Buff = n_speciesInRMObse.y/Buff100) %>%
mutate(logInRMSpec_Buff = ifelse(logInRMSpec_Buff > 1, 1, logInRMSpec_Buff),
logInRMObse_Buff = ifelse(logInRMObse_Buff > 1, 1, logInRMObse_Buff))
yearly[i-1949,3] <- nrow(filter(grd_100c, logInRMSpec_Buff >= 0.8))
yearly[i-1949,4] <- nrow(filter(grd_100c, logInRMObse_Buff >= 0.8))
yearly[i-1949,5] <- mean(na.omit(grd_100c$logInRMSpec_Buff))
yearly[i-1949,6] <- mean(na.omit(grd_100c$logInRMObse_Buff))
i = i+2
}
yearly <- na.omit(yearly)
yearly <- yearly %>% mutate(total = specCom....double..+obsCom....double..+
(nrow(grd_100)-specCom....double..+obsCom....double..),
percSpec = specCom....double../total,
obsCom = obsCom....double../total,
unsamp = (nrow(grd_100)-specCom....double..+obsCom....double..)/total,
ratioS = specCom....double../(specCom....double..+obsCom....double..),
ratioO = obsCom....double../(specCom....double..+obsCom....double..))
# ggplot()+
# geom_line(aes(x=yearly$startYear....integer.., y=yearly$specCom....double..), color="red4", lwd=1.1)+
# geom_point(aes(x=yearly$startYear....integer.., y=yearly$specCom....double..), color="red4", fill="white", pch=21, size=2)+
# geom_line(aes(x=yearly$startYear....integer.., y=yearly$obsCom....double..), color="royalblue4", lwd=1.1)+
# geom_point(aes(x=yearly$startYear....integer.., y=yearly$obsCom....double..), color="royalblue4", fill="white", pch=21, size=2)+
# labs(y="Number of 100km Cells Over 80% Complete", x="Years of Sampling")+
# scale_fill_manual(values=c("red4", "royalblue4"), labels=c("Specimen Data", "Observation Data"))+
# theme_minimal()
# ggplot()+
# geom_line(aes(x=yearly$startYear....integer.., y=yearly$specAve....double..), color="red4", lwd=1.1)+
# geom_point(aes(x=yearly$startYear....integer.., y=yearly$specAve....double..), color="red4", fill="white", pch=21, size=2)+
# geom_line(aes(x=yearly$startYear....integer.., y=yearly$obsAve....double..), color="royalblue4", lwd=1.1)+
# geom_point(aes(x=yearly$startYear....integer.., y=yearly$obsAve....double..), color="royalblue4", fill="white", pch=21, size=2)+
# labs(y="Average Sampling Completeness Across All Cells", x="Years of Sampling")+
# theme_minimal()
yearly.long <- yearly %>% dplyr::select(startYear....integer.., specCom....double.., obsCom....double..) %>% reshape2::melt(id.vars="startYear....integer..")
# Supplemental 1
p1 <- ggplot()+
geom_bar(aes(y=value, x=startYear....integer.., fill=variable), data=yearly.long, stat="identity", width = 1.8)+
theme_minimal()+labs(x="Sampling Start Year", y="Count of Cells over 80% Complete")+
scale_x_continuous(breaks=seq(1950, 2019, 2))+
scale_fill_manual(values=c("red3", "royalblue3"), labels = c("Museum Specimens", "Community Observations"))+
theme(axis.text.x=element_text(angle=90, hjust=1, vjust=0.2), panel.background = element_rect(fill = "white", colour = "grey50"),
panel.grid.minor = element_blank(), panel.grid.major.x = element_blank(), legend.title = element_blank(),
legend.position = "none", axis.text=element_text(size=12)) + labs(tag="(b)")
sup <- as.data.frame(occur_inRM) %>% mutate(basis=recode(basis, preservedspecimen="PRESERVED_SPECIMEN",
OBSERVATION="HUMAN_OBSERVATION",
MATERIAL_SAMPLE="PRESERVED_SPECIMEN"))
sup <- sup %>% filter(basis %in% c("HUMAN_OBSERVATION",
"PRESERVED_SPECIMEN")) %>% group_by(year, basis) %>% tally()
p2 <- ggplot()+
geom_bar(aes(y=n, x=year, fill=basis), data=sup, stat="identity", width=1)+
theme_minimal()+labs(x="Sampling Year", y="Number of Records")+
scale_fill_manual(values=c("royalblue3", "red3"), labels = c("Museum Specimens", "Community Observations"))+
theme(axis.text.x=element_text(angle=90, hjust=1, vjust=0.2), panel.background = element_rect(fill = "white", colour = "grey50"),
panel.grid.minor = element_blank(), panel.grid.major.x = element_blank(), legend.title = element_blank(),
legend.position = "none", axis.text=element_text(size=12))+
scale_y_continuous(label=comma) + labs(tag="(a)")
tiff("Supp1.tiff", units="in", width=7, height=6, res=350)
grid.arrange(p2, p1, nrow=2)
dev.off()
library(ggsignif)
# Between family ANOVAs
fams <- as.data.frame(grd_100) %>% dplyr::select(FID, nymRatio, papRatio, lycRatio, hesRatio, pieRatio)
fams <- fams %>% reshape2::melt(id.vars="FID")
aov.all <- aov(value~variable, data=fams)
TukeyHSD(aov.all)
fams.obs <- as.data.frame(grd_100) %>% dplyr::select(FID, nymobsRatio, papobsRatio, lycobsRatio, hesobsRatio, pieobsRatio)
fams.obs <- fams.obs %>% reshape2::melt(id.vars="FID")
aov.obs <- aov(value~variable, data=fams.obs)
TukeyHSD(aov.obs)
fams.spec <- as.data.frame(grd_100) %>% dplyr::select(FID, nymspecRatio, papspecRatio, lycspecRatio, hesspecRatio, piespecRatio)
fams.spec <- fams.spec %>% reshape2::melt(id.vars="FID")
aov.spec <- aov(value~variable, data=fams.spec)
TukeyHSD(aov.spec)
fams.chi <- as.data.frame(grd_100) %>% filter(logInRMOccur_Buff >= 0.5) %>% dplyr::select(FID, nymspecRatio, papspecRatio, lycspecRatio, hesspecRatio, piespecRatio,
nymobsRatio, papobsRatio, lycobsRatio, hesobsRatio, pieobsRatio)
# report counts for chi-square
nrow(filter(fams.chi, nymspecRatio > 0.5))
nrow(filter(fams.chi, nymobsRatio > 0.5))
nrow(filter(fams.chi, papspecRatio > 0.5))
nrow(filter(fams.chi, papobsRatio > 0.5))
nrow(filter(fams.chi, lycspecRatio > 0.5))
nrow(filter(fams.chi, lycobsRatio > 0.5))
nrow(filter(fams.chi, hesspecRatio > 0.5))
nrow(filter(fams.chi, hesobsRatio > 0.5))
nrow(filter(fams.chi, piespecRatio > 0.5))
nrow(filter(fams.chi, pieobsRatio > 0.5))
#write.csv(fams.chi, "chi_raw.csv")
library(chisq.posthoc.test)
fams.chi <- read.csv("chi_raw.csv")
rownames(fams.chi) <- fams.chi[,1]
fams.chi <- fams.chi[,-1]
chisq.test(fams.chi)
chisq.posthoc.test(fams.chi, method="bonferroni", round=6)
fams.chi <- read.csv("chi_plots.csv")
chi.nym <- ggplot()+
geom_bar(data=filter(fams.chi, Family=="Nym"), aes(x=Type, y=Value, col=Type, fill=Type), stat="identity",
show.legend=FALSE)+xlab("")+ylab("")+scale_fill_manual(values=c("royalblue3", "red3"))+
scale_color_manual(values=c("royalblue3", "red3"))+ylim(0,600)+
theme_minimal() + labs(tag=" ")
chi.pap <- ggplot()+
geom_bar(data=filter(fams.chi, Family=="Pap"), aes(x=Type, y=Value), stat="identity",
show.legend=FALSE)+xlab("")+ylab("")+ylim(0,600)+
theme_minimal()+ labs(tag=" ")
chi.lyc <- ggplot()+
geom_bar(data=filter(fams.chi, Family=="Lyc"), aes(x=Type, y=Value), stat="identity",
show.legend=FALSE)+xlab("")+ylab("")+ylim(0,600)+
theme_minimal()+ labs(tag=" ")
chi.hes <- ggplot()+
geom_bar(data=filter(fams.chi, Family=="Hes"), aes(x=Type, y=Value), stat="identity",
show.legend=FALSE)+xlab("")+ylab("")+ylim(0,600)+
theme_minimal() + labs(tag="(d)")
chi.pie <- ggplot()+
geom_bar(data=filter(fams.chi, Family=="Pie"), aes(x=Type, y=Value, col=Type, fill=Type), stat="identity",
show.legend=FALSE)+xlab("")+ylab("")+scale_fill_manual(values=c("royalblue3", "red3"))+
scale_color_manual(values=c("royalblue3", "red3"))+ylim(0,600)+
theme_minimal() + labs(tag=" ")
library(ggsignif)
png("tukeys.png", units="cm", height = 12, width = 20, res=400)
p1 <- ggplot(fams, aes(x=variable, y=value))+
geom_boxplot(fill="grey80", color="black")+
scale_x_discrete() + xlab("Family")+
ylab("Completeness")+
geom_signif(comparisons=list(c("nymRatio", "papRatio"),
c("nymRatio", "lycRatio"),
c("nymRatio", "hesRatio"),
c("nymRatio", "pieRatio"),
c("lycRatio", "papRatio"),
c("hesRatio", "papRatio"),
c("pieRatio", "papRatio"),
c("hesRatio", "lycRatio"),
c("pieRatio", "lycRatio"),
c("pieRatio", "hesRatio")),
map_signif_level = TRUE,
tip_length=0,
y_position = c(-0.1, -0.2, -0.3, -0.4, 1.1, 1.2, 1.3, -0.6, -0.7, 1.5)) +
theme_minimal()+ scale_x_discrete(labels=c("Nym", "Pap", "Lyc", "Hes", "Pie"))+ labs(tag="(a)")
p2 <- ggplot(fams.obs, aes(x=variable, y=value))+
geom_boxplot(fill="grey80", color="black")+
scale_x_discrete() + xlab("Family")+
ylab("Completeness")+
geom_signif(comparisons=list(c("nymobsRatio", "papobsRatio"),
c("nymobsRatio", "lycobsRatio"),
c("nymobsRatio", "hesobsRatio"),
c("nymobsRatio", "pieobsRatio"),
c("lycobsRatio", "papobsRatio"),
c("hesobsRatio", "papobsRatio"),
c("pieobsRatio", "papobsRatio"),
c("hesobsRatio", "lycobsRatio"),
c("pieobsRatio", "lycobsRatio"),
c("pieobsRatio", "hesobsRatio")),
map_signif_level = TRUE,
tip_length=0,
y_position = c(-0.1, -0.2, -0.3, -0.4, 1.1, 1.2, 1.3, -0.6, -0.7, 1.5)) +
theme_minimal() + ylab("")+ scale_x_discrete(labels=c("Nym", "Pap", "Lyc", "Hes", "Pie"))+ labs(tag="(b)")
p3 <- ggplot(fams.spec, aes(x=variable, y=value))+
geom_boxplot(fill="grey80", color="black")+
scale_x_discrete() + xlab("Family")+
ylab("Completeness")+
geom_signif(comparisons=list(c("nymspecRatio", "papspecRatio"),
c("nymspecRatio", "lycspecRatio"),
c("nymspecRatio", "hesspecRatio"),
c("nymspecRatio", "piespecRatio"),
c("lycspecRatio", "papspecRatio"),
c("hesspecRatio", "papspecRatio"),
c("piespecRatio", "papspecRatio"),
c("hesspecRatio", "lycspecRatio"),
c("piespecRatio", "lycspecRatio"),
c("piespecRatio", "hesspecRatio")),
map_signif_level = TRUE,
tip_length=0,
y_position = c(-0.1, -0.2, -0.3, -0.4, 1.1, 1.2, 1.3, -0.6, -0.7, 1.5)) +
theme_minimal() + ylab("")+ scale_x_discrete(labels=c("Nym", "Pap", "Lyc", "Hes", "Pie"))+ labs(tag="(c)")
grid.arrange(p1, p2, p3, ncol=3, nrow=1)
dev.off()
# decades
# i = 1950
# while(i < 2020){
# print(paste("Processing years: ", i, "-", i+9))
#
# yearly[i-1949,]$startYear <- i
# yearly[i-1949,]$endYear <- i+1
#
# grd_100_biyearlyspec <- occur_spec %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMSpec=n_distinct(species))
# grd_100_biyearlyobs <- occur_obse %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMObse=n_distinct(species))
#
# yearly[i-1949,]$specCom <- mean(grd_100_biyearlyspec$n_speciesInRMSpec)
# yearly[i-1949,]$obsCom <- mean(grd_100_biyearlyspec$n_speciesInRMObse)
#
# i = i+10
# }
##################
# Visualizations #
##################
# 100 km fishnet no buffer
# km100_noBuffer <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=noBuff100), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, midpoint=300)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
# ggsave("km100_noBuffer.pdf", plot=km100_noBuffer)
#
# # 100 km fishnet buffer
# km100_Buffer <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(Buff100)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
# ggsave("km100_Buffer.pdf", plot=km100_Buffer)
#
# # visualize the occurrence richness values
# # 100 km all occurrences
# km100_allOccur <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(n_speciesAll)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
# ggsave("km100_allOccur.pdf", plot=km100_allOccur)
#
# # 100 km in range occurrences
# km100_allOccurInRM <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(n_speciesInRM)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
# ggsave("km100_allOccurInRM.pdf", plot=km100_allOccurInRM)
#
# # 100 km in range occurrences from specimens
# km100_specimensInRM <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(n_speciesInRMSpec)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
# ggsave("km100_specimensInRM.pdf", plot=km100_specimensInRM)
#
# # 100 km in range occurrences from human observations
# km100_observationsInRM <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(n_speciesInRMObse)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
# ggsave("km100_observationsInRM.pdf", plot=km100_observationsInRM)
#
# # 100 km all occurrences no buffer
# ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(logAllOccur_NoBuff)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # 100 km range occurrences no buffer
# ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=logInRMOccur_NoBuff), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
###########################################################
##### ALL RECORDS IN RANGE MAPS WITH BUFFERING FIGURE #####
###########################################################
# 100 km resolution
# composite data
grd_100_p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=logInRMOccur_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void() + labs(tag="(a)")
# museum specimens
grd_100_p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=logInRMSpec_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void() + labs(tag="(b)")
# human observations
grd_100_p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=logInRMObse_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void() + labs(tag="(c)")
# 200 km resolution
# composite data
grd_200_p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_200, mapping=aes(fill=logInRMOccur_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# museum specimens
grd_200_p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_200, mapping=aes(fill=logInRMSpec_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# human observations
grd_200_p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_200, mapping=aes(fill=logInRMObse_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# 400 km resolution
# composite data
grd_400_p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_400, mapping=aes(fill=logInRMOccur_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# museum specimens
grd_400_p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_400, mapping=aes(fill=logInRMSpec_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# human observations
grd_400_p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_400, mapping=aes(fill=logInRMObse_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
tiff("Basis.tiff", units="cm", width=16.6, height=18, res=400)
grid.arrange(grd_100_p1, grd_100_p2, grd_100_p3,
grd_200_p1, grd_200_p2, grd_200_p3,
grd_400_p1, grd_400_p2, grd_400_p3, nrow=3, ncol=3)
dev.off()
# # 800 km resolution
# # composite data
# grd_800_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=logInRMOccur_Buff), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # museum specimens
# grd_800_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=logInRMSpec_Buff), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # human observations
# grd_800_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=logInRMObse_Buff), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# g4 <- grid.arrange(grd_800_p1, grd_800_p2, grd_800_p3, nrow=3)
########################################################
##### FAMILY SPECIFIC SPECIES RICHNESS FROM RANGES #####
########################################################
# 100 km resolution #
# Hesperiidae
# grd_100_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(Buff100_hes)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Lycaenidae
# grd_100_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(Buff100_lyc)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Nymphalidae
# grd_100_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(Buff100_nym)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Papilionidae
# grd_100_p4 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(Buff100_pap)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Pieridae
# grd_100_p5 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(Buff100_pie)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# grid.arrange(grd_100_p1, grd_100_p2, grd_100_p3, grd_100_p4, grd_100_p5, nrow=5, ncol=1)
# # 200 km resolution #
# # Hesperiidae
# grd_200_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=log(Buff200_hes)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Lycaenidae
# grd_200_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=log(Buff200_lyc)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Nymphalidae
# grd_200_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=log(Buff200_nym)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Papilionidae
# grd_200_p4 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=log(Buff200_pap)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Pieridae
# grd_200_p5 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=log(Buff200_pie)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# grid.arrange(grd_200_p1, grd_200_p2, grd_200_p3, grd_200_p4, grd_200_p5, nrow=3, ncol=2)
#
# # 400 km resolution #
# # Hesperiidae
# grd_400_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=log(Buff400_hes)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Lycaenidae
# grd_400_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=log(Buff400_lyc)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Nymphalidae
# grd_400_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=log(Buff400_nym)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Papilionidae
# grd_400_p4 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=log(Buff400_pap)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Pieridae
# grd_400_p5 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=log(Buff400_pie)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# grid.arrange(grd_400_p1, grd_400_p2, grd_400_p3, grd_400_p4, grd_400_p5, nrow=3, ncol=2)
#
# # 800 km resolution #
# # Hesperiidae
# grd_800_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=log(Buff800_hes)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Lycaenidae
# grd_800_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=log(Buff800_lyc)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Nymphalidae
# grd_800_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=log(Buff800_nym)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Papilionidae
# grd_800_p4 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=log(Buff800_pap)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Pieridae
# grd_800_p5 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=log(Buff800_pie)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# grid.arrange(grd_800_p1, grd_800_p2, grd_800_p3, grd_800_p4, grd_800_p5, nrow=3, ncol=2)
##### FAMILIES LEVEL COMPLETENESS #####
# 100 km resolution observations only
# Hesperiidae
grd_100_p1_obse <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=hesobsRatio), color=NA, alpha=0.7, show.legend=TRUE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void() + labs(tag="(c)")
# Lycaenidae
grd_100_p2_obse <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=lycobsRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Nymphalidae
grd_100_p3_obse <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=nymobsRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Papilionidae
grd_100_p4_obse <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=papobsRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Pieridae
grd_100_p5_obse <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=pieobsRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# tiff("100kmFamilyObse.tiff", units="in", width=7, height=10, res=350)
# grid.arrange(grd_100_p1, grd_100_p2, grd_100_p3, grd_100_p4, grd_100_p5, nrow=5, ncol=1)
# dev.off()
# 100 km resolution specimens only
# Hesperiidae
grd_100_p1_spec <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=hesspecRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void() + labs(tag="(b)")
# Lycaenidae
grd_100_p2_spec <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=lycspecRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Nymphalidae
grd_100_p3_spec <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=nymspecRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Papilionidae
grd_100_p4_spec <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=papspecRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Pieridae
grd_100_p5_spec <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=piespecRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# tiff("100kmFamilySpec.tiff", units="in", width=7, height=10, res=350)
# grid.arrange(grd_100_p1, grd_100_p2, grd_100_p3, grd_100_p4, grd_100_p5, nrow=5, ncol=1)
# dev.off()
# 100 km all records
# Hesperiidae
grd_100_p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=hesRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void() + labs(tag="(a)")
# Lycaenidae
grd_100_p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=lycRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Nymphalidae
grd_100_p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=nymRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Papilionidae
grd_100_p4 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=papRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Pieridae
grd_100_p5 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=pieRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
tiff("100kmFamilyAll_withChi.tiff", units="cm", width=19.5, height=20, res=350)
grid.arrange(grd_100_p1, grd_100_p1_spec, grd_100_p1_obse, chi.hes,
grd_100_p2, grd_100_p2_spec, grd_100_p2_obse, chi.lyc,
grd_100_p3, grd_100_p3_spec, grd_100_p3_obse, chi.nym,
grd_100_p4, grd_100_p4_spec, grd_100_p4_obse, chi.pap,
grd_100_p5, grd_100_p5_spec, grd_100_p5_obse, chi.pie,
nrow=5, ncol=4)
dev.off()
# # 200 km resolution
# # Hesperiidae
# grd_200_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=hesObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Lycaenidae
# grd_200_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=lycObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Nymphalidae
# grd_200_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=nymObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Papilionidae
# grd_200_p4 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=papObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Pieridae
# grd_200_p5 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=pieObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# grid.arrange(grd_200_p1, grd_200_p2, grd_200_p3, grd_200_p4, grd_200_p5, nrow=5, ncol=1)
#
# # 400 km resolution
# # Hesperiidae
# grd_400_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=hesObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Lycaenidae
# grd_400_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=lycObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Nymphalidae
# grd_400_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=nymObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Papilionidae
# grd_400_p4 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=papObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Pieridae
# grd_400_p5 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=pieObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# grid.arrange(grd_400_p1, grd_400_p2, grd_400_p3, grd_400_p4, grd_400_p5, nrow=5, ncol=1)
#
# # 800 km resolution
# # Hesperiidae
# grd_800_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=hesObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Lycaenidae
# grd_800_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=lycObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Nymphalidae
# grd_800_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=nymObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Papilionidae
# grd_800_p4 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=papObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Pieridae
# grd_800_p5 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=pieObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# grid.arrange(grd_800_p1, grd_800_p2, grd_800_p3, grd_800_p4, grd_800_p5, nrow=5, ncol=1)
################
# BOREAL PLOTS #
################
boreal <- st_read("borealtundra.shp")
boreal <- st_transform(boreal, st_crs(crs.1))
boreal <- st_crop(boreal, grd_100)
tiff("100kmFamilyAll_withChi.tiff", units="cm", width=10, height=10, res=350)
ggplot()+
geom_sf(data=land, fill="grey", color=NA)+
geom_sf(data=boreal, fill="#234F1E", color=NA)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+
theme(legend.title = element_blank())
dev.off()
# 100km scale over time #
p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_100, mapping=aes(fill=t1Ratio, color=(t1Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_100, mapping=aes(fill=t2Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_100, mapping=aes(fill=t3Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25 & t3Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5,)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p4 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_100, mapping=aes(fill=t4Ratio, color=(t1Ratio>=0.25 & t2Ratio>=0.25 & t4Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()
grid.arrange(p1, p2, p3, p4, nrow=1, ncol=4)
# 200 km scale over time #
p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_200, mapping=aes(fill=t1Ratio, color=(t1Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_200, mapping=aes(fill=t2Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_200, mapping=aes(fill=t3Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25 & t3Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5,)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p4 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_200, mapping=aes(fill=t4Ratio, color=(t1Ratio>=0.25 & t2Ratio>=0.25 & t4Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()
grid.arrange(p1, p2, p3, p4, nrow=1, ncol=4)
# 400 km scale #
p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_400, mapping=aes(fill=t1Ratio, color=(t1Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_400, mapping=aes(fill=t2Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_400, mapping=aes(fill=t3Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25 & t3Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5,)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p4 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_400, mapping=aes(fill=t4Ratio, color=(t1Ratio>=0.25 & t2Ratio>=0.25 & t4Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()
grid.arrange(p1, p2, p3, p4, nrow=1, ncol=4)
# 800 km scale #
p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_800, mapping=aes(fill=t1Ratio, color=(t1Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_800, mapping=aes(fill=t2Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_800, mapping=aes(fill=t3Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25 & t3Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5,)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p4 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_800, mapping=aes(fill=t4Ratio, color=(t1Ratio>=0.25 & t2Ratio>=0.25 & t4Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()
grid.arrange(p1, p2, p3, p4, nrow=1, ncol=4)
|
/analysis.R
|
no_license
|
vmshirey/butterflySampling
|
R
| false
| false
| 95,153
|
r
|
#
#
#
# load libraries
library(tidyverse); library(sp); library(sf); library(raster); library(data.table); library(mapdata)
library(maptools); library(gridExtra); library(nngeo); library(stringr); library(rgdal); library(scales)
# basemap
crs.1 <- "+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m"
land <- st_read("ne_50m_admin_0_countries.shp")
land <- st_transform(land, st_crs("+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m"))
rcp8.5_2080 <- raster("covariates/fwvel_ensemble_rcp85_2085.tif")
rcp8.5_2080 <- projectRaster(rcp8.5_2080,
crs="+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m",
res=10000, method="bilinear")
# read in base grid data
grd_100 <- st_read("100km_gridClean.shp") %>% dplyr::select(X,Y)
grd_100 <- unique(grd_100) %>% mutate(FID=row_number())
st_crs(grd_100) <- crs.1
grd_200 <- st_read("200km_gridClean.shp") %>% dplyr::select(X,Y)
grd_200 <- unique(grd_200) %>% mutate(FID=row_number())
st_crs(grd_100) <- crs.1
grd_400 <- st_read("400km_gridClean.shp") %>% dplyr::select(X,Y)
grd_400 <- unique(grd_400) %>% mutate(FID=row_number())
st_crs(grd_100) <- crs.1
# grd_800 <- st_read("800km_gridClean.shp") %>% dplyr::select(X,Y)
# grd_800 <- unique(grd_800) %>% mutate(FID=row_number())
# st_crs(grd_100) <- 102008
# human footprint map
footprint <- raster("covariates/footprint_clipped.tif")
footprint <- projectRaster(footprint,
crs="+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m",
res=10000, method="bilinear")
footprint <- crop(footprint, grd_100)
footprint[footprint > 50] <- NA
# WWF biomes
biomes <- raster("covariates/WWFBiomes.tif")
crs(biomes) <- "+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m"
biomes <- crop(biomes, grd_100)
biomes <- projectRaster(biomes,
crs="+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m",
res=10000, method="bilinear")
biomes[biomes > 50] <- NA
# read in fishnet data and count overall richness by overlap
# 100 km resolution
fsh_100_Buff <- fread("100km_Buffer.csv", header=TRUE, sep=",", stringsAsFactors=FALSE)
fsh_100_Buff <- st_as_sf(fsh_100_Buff, coords=c("X", "Y"))
st_crs(fsh_100_Buff) <- crs.1
grd_100$Buff100 <- lengths(st_intersects(grd_100, fsh_100_Buff))
# 200 km resolution
fsh_200_noBuff <- fread("200km_NoBuffer.csv", header=TRUE, sep=",", stringsAsFactors=FALSE)
fsh_200_noBuff <- st_as_sf(fsh_200_noBuff, coords=c("X", "Y"))
st_crs(fsh_200_noBuff) <- crs.1
grd_200$noBuff200 <- lengths(st_intersects(grd_200, fsh_200_noBuff))
# 400 km resolution
fsh_400_noBuff <- fread("400km_NoBuffer.csv", header=TRUE, sep=",", stringsAsFactors=FALSE)
fsh_400_noBuff <- st_as_sf(fsh_400_noBuff, coords=c("X", "Y"))
st_crs(fsh_400_noBuff) <- crs.1
grd_400$noBuff400 <- lengths(st_intersects(grd_400, fsh_400_noBuff))
# 800 km resolution
# fsh_800_noBuff <- fread("800km_NoBuffer.csv", header=TRUE, sep=",", stringsAsFactors=FALSE)
# fsh_800_noBuff <- st_as_sf(fsh_800_noBuff, coords=c("X", "Y"))
# st_crs(fsh_800_noBuff) <- 102008
#
# grd_800$noBuff800 <- lengths(st_intersects(grd_800, fsh_800_noBuff))
#
# fsh_800_Buff <- fread("800km_Buffer.csv", header=TRUE, sep=",", stringsAsFactors=FALSE)
# fsh_800_Buff <- st_as_sf(fsh_800_Buff, coords=c("X", "Y"))
# st_crs(fsh_800_Buff) <- 102008
#
# grd_800$Buff800 <- lengths(st_intersects(grd_800, fsh_800_Buff))
############################################
# read in and merge occurrence information #
############################################
ebut <- as_tibble(fread("ebut_intersections.csv", header=TRUE, sep=",", stringsAsFactors=FALSE))
ebut <- ebut %>%
mutate(year=as.numeric(str_extract(Date.Observed, "^\\d{4}"))) %>%
dplyr::select(Family, species, X, Y, year, inRM) %>%
mutate(basis="HUMAN_OBSERVATION", family=str_to_sentence(Family)) %>%
dplyr::select(-Family)
idig <- as_tibble(fread("idig_intersections.csv", header=TRUE, sep=",", stringsAsFactors=FALSE))
idig <- idig %>%
mutate(year=as.numeric(str_extract(dwc.eventDate, "^\\d{4}"))) %>%
dplyr::select(dwc.family, species, X, Y, inRM, year, basis=dwc.basisOfRecord) %>%
mutate(family=str_to_sentence(dwc.family)) %>%
dplyr::select(-dwc.family)
gbif <- as_tibble(fread("total_gbif_intersections.csv", header=TRUE, sep=",", stringsAsFactors=FALSE))
gbif <- gbif %>%
dplyr::select(family, species, X, Y, inRM, year, basis=basisOfRecord) %>%
mutate(family=str_to_sentence(family))
occur <- rbind(ebut, idig, gbif)
taxa <- unique(dplyr::select(occur, family, species))
rm(list=c("ebut", "idig", "gbif"))
##################################
# update families in fishnetting #
##################################
# 100 km resolution
fsh_100_Buff <- fsh_100_Buff %>%
left_join(taxa, by=c("scientificName"="species")) %>%
dplyr::select(-scientificName) %>%
mutate(family=str_to_sentence(family))
# 200 km resolution
fsh_200_noBuff <- fsh_200_noBuff %>%
left_join(taxa, by=c("scientificName"="species")) %>%
dplyr::select(-scientificName) %>%
mutate(family=str_to_sentence(family))
# 400 km resolution
fsh_400_noBuff <- fsh_400_noBuff %>%
left_join(taxa, by=c("scientificName"="species")) %>%
dplyr::select(-scientificName) %>%
mutate(family=str_to_sentence(family))
# 800 km resolution
# fsh_800_noBuff <- fsh_800_noBuff %>%
# left_join(taxa, by=c("scientificName"="species")) %>%
# dplyr::select(-scientificName) %>%
# mutate(family=str_to_sentence(family))
###################################
# filter fishnets to family level #
###################################
# 100 meter resolution
fsh_100_Buff_nym <- filter(fsh_100_Buff, family=="Nymphalidae")
grd_100$Buff100_nym <- lengths(st_intersects(grd_100, fsh_100_Buff_nym))
fsh_100_Buff_pap <- filter(fsh_100_Buff, family=="Papilionidae")
grd_100$Buff100_pap <- lengths(st_intersects(grd_100, fsh_100_Buff_pap))
fsh_100_Buff_lyc <- filter(fsh_100_Buff, family=="Lycaenidae")
grd_100$Buff100_lyc <- lengths(st_intersects(grd_100, fsh_100_Buff_lyc))
fsh_100_Buff_hes <- filter(fsh_100_Buff, family=="Hesperiidae")
grd_100$Buff100_hes <- lengths(st_intersects(grd_100, fsh_100_Buff_hes))
fsh_100_Buff_pie <- filter(fsh_100_Buff, family=="Pieridae")
grd_100$Buff100_pie <- lengths(st_intersects(grd_100, fsh_100_Buff_pie))
fsh_100_Buff_rio <- filter(fsh_100_Buff, family=="Riodinidae")
grd_100$Buff100_rio <- lengths(st_intersects(grd_100, fsh_100_Buff_rio))
# 200 meter resolution
fsh_200_noBuff_nym <- filter(fsh_200_noBuff, family=="Nymphalidae")
grd_200$noBuff200_nym <- lengths(st_intersects(grd_200, fsh_200_noBuff_nym))
fsh_200_noBuff_pap <- filter(fsh_200_noBuff, family=="Papilionidae")
grd_200$noBuff200_pap <- lengths(st_intersects(grd_200, fsh_200_noBuff_pap))
fsh_200_noBuff_lyc <- filter(fsh_200_noBuff, family=="Lycaenidae")
grd_200$noBuff200_lyc <- lengths(st_intersects(grd_200, fsh_200_noBuff_lyc))
fsh_200_noBuff_hes <- filter(fsh_200_noBuff, family=="Hesperiidae")
grd_200$noBuff200_hes <- lengths(st_intersects(grd_200, fsh_200_noBuff_hes))
fsh_200_noBuff_pie <- filter(fsh_200_noBuff, family=="Pieridae")
grd_200$noBuff200_pie <- lengths(st_intersects(grd_200, fsh_200_noBuff_pie))
fsh_200_noBuff_rio <- filter(fsh_200_noBuff, family=="Riodinidae")
grd_200$noBuff200_rio <- lengths(st_intersects(grd_200, fsh_200_noBuff_rio))
# 400 meter resolution
fsh_400_noBuff_nym <- filter(fsh_400_noBuff, family=="Nymphalidae")
grd_400$noBuff400_nym <- lengths(st_intersects(grd_400, fsh_400_noBuff_nym))
fsh_400_noBuff_pap <- filter(fsh_400_noBuff, family=="Papilionidae")
grd_400$noBuff400_pap <- lengths(st_intersects(grd_400, fsh_400_noBuff_pap))
fsh_400_noBuff_lyc <- filter(fsh_400_noBuff, family=="Lycaenidae")
grd_400$noBuff400_lyc <- lengths(st_intersects(grd_400, fsh_400_noBuff_lyc))
fsh_400_noBuff_hes <- filter(fsh_400_noBuff, family=="Hesperiidae")
grd_400$noBuff400_hes <- lengths(st_intersects(grd_400, fsh_400_noBuff_hes))
fsh_400_noBuff_pie <- filter(fsh_400_noBuff, family=="Pieridae")
grd_400$noBuff400_pie <- lengths(st_intersects(grd_400, fsh_400_noBuff_pie))
fsh_400_noBuff_rio <- filter(fsh_400_noBuff, family=="Riodinidae")
grd_400$noBuff400_rio <- lengths(st_intersects(grd_400, fsh_400_noBuff_rio))
# 800 meter resolution
# fsh_800_Buff_nym <- filter(fsh_800_noBuff, family=="Nymphalidae")
# grd_800$Buff800_nym <- lengths(st_intersects(grd_800, fsh_800_Buff_nym))
#
# fsh_800_Buff_pap <- filter(fsh_800_noBuff, family=="Papilionidae")
# grd_800$Buff800_pap <- lengths(st_intersects(grd_800, fsh_800_Buff_pap))
#
# fsh_800_Buff_lyc <- filter(fsh_800_noBuff, family=="Lycaenidae")
# grd_800$Buff800_lyc <- lengths(st_intersects(grd_800, fsh_800_Buff_lyc))
#
# fsh_800_Buff_hes <- filter(fsh_800_noBuff, family=="Hesperiidae")
# grd_800$Buff800_hes <- lengths(st_intersects(grd_800, fsh_800_Buff_hes))
#
# fsh_800_Buff_pie <- filter(fsh_800_noBuff, family=="Pieridae")
# grd_800$Buff800_pie <- lengths(st_intersects(grd_800, fsh_800_Buff_pie))
#
# fsh_800_Buff_rio <- filter(fsh_800_noBuff, family=="Riodinidae")
# grd_800$Buff800_rio <- lengths(st_intersects(grd_800, fsh_800_Buff_rio))
#############################################
# convert occurrence data to spatial object #
#############################################
# filter for records between 1950-2019
occur <- occur %>% filter(between(year, 1950, 2019))
occur <- st_as_sf(occur, coords=c("X", "Y"))
st_crs(occur) <- crs.1
###############################################
# apply filters for basis of record attribute #
###############################################
occur <- occur
occur_inRM <- filter(occur, inRM=="Yes")
occur_spec <- filter(occur_inRM, basis!="HUMAN_OBSERVATION", basis!="UNKNOWN", basis!="machineobservation", basis!="MACHINE_OBSERVATION")
occur_obse <- filter(occur_inRM, basis=="HUMAN_OBSERVATION")
nrow(filter(occur, inRM=="Yes"))/nrow(occur) # number in range overall
inRMYears <- as.data.frame(occur) %>% filter(inRM=="Yes") %>%
group_by(year) %>% summarise(n=n())
outRMYears <- as.data.frame(occur) %>% filter(inRM=="No") %>%
group_by(year) %>% summarise(n=n())
totalRMYears <- merge(outRMYears, inRMYears, by="year") %>%
filter(between(year, 1950, 2019)) %>% mutate(perc.n.in = n.y/(n.x+n.y))
plot(totalRMYears$year, totalRMYears$perc.n.in)
mean(totalRMYears$perc.n.in)
1.96*sd(totalRMYears$perc.n.in)/sqrt(nrow(totalRMYears))
mean(filter(totalRMYears, between(year, 2010, 2019))$perc.n.in)
1.96*sd(filter(totalRMYears, between(year, 2010, 2019))$perc.n.in)/sqrt(nrow(filter(totalRMYears, between(year, 2010, 2019))))
occur_nym <- filter(occur_inRM, family=="Nymphalidae")
occur_pap <- filter(occur_inRM, family=="Papilionidae")
occur_lyc <- filter(occur_inRM, family=="Lycaenidae")
occur_hes <- filter(occur_inRM, family=="Hesperiidae")
occur_pie <- filter(occur_inRM, family=="Pieridae")
occur_spec_nym <- filter(occur_spec, family=="Nymphalidae")
occur_spec_pap <- filter(occur_spec, family=="Papilionidae")
occur_spec_lyc <- filter(occur_spec, family=="Lycaenidae")
occur_spec_hes <- filter(occur_spec, family=="Hesperiidae")
occur_spec_pie <- filter(occur_spec, family=="Pieridae")
occur_obse_nym <- filter(occur_obse, family=="Nymphalidae")
occur_obse_pap <- filter(occur_obse, family=="Papilionidae")
occur_obse_lyc <- filter(occur_obse, family=="Lycaenidae")
occur_obse_hes <- filter(occur_obse, family=="Hesperiidae")
occur_obse_pie <- filter(occur_obse, family=="Pieridae")
#
# occur_t1 <- filter(occur, between(year, 1950, 1969))
# occur_t2 <- filter(occur, between(year, 1970, 1989))
# occur_t3 <- filter(occur, between(year, 1990, 2009))
# occur_t4 <- filter(occur, between(year, 2010, 2019))
#################################
# count unique species in grids #
#################################
# 100 meter resolution
grd_100_allRich <- occur %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesAll=n_distinct(species))
grd_100_inRMRich <- occur_inRM %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRM=n_distinct(species))
grd_100_inRMspecRich <- occur_spec %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMSpec=n_distinct(species))
grd_100_inRMobseRich <- occur_obse %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMObse=n_distinct(species))
grd_100_inRMnym <- occur_nym %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMNym=n_distinct(species))
grd_100_inRMpap <- occur_pap %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMPap=n_distinct(species))
grd_100_inRMlyc <- occur_lyc %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMlyc=n_distinct(species))
grd_100_inRMhes <- occur_hes %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMhes=n_distinct(species))
grd_100_inRMpie <- occur_pie %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMpie=n_distinct(species))
grd_100_inRMspecnym <- occur_spec_nym %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMspecNym=n_distinct(species))
grd_100_inRMspecpap <- occur_spec_pap %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMspecPap=n_distinct(species))
grd_100_inRMspeclyc <- occur_spec_lyc %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMspeclyc=n_distinct(species))
grd_100_inRMspeches <- occur_spec_hes %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMspeches=n_distinct(species))
grd_100_inRMspecpie <- occur_spec_pie %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMspecpie=n_distinct(species))
grd_100_inRMobsenym <- occur_obse_nym %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMobseNym=n_distinct(species))
grd_100_inRMobsepap <- occur_obse_pap %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMobsePap=n_distinct(species))
grd_100_inRMobselyc <- occur_obse_lyc %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMobselyc=n_distinct(species))
grd_100_inRMobsehes <- occur_obse_hes %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMobsehes=n_distinct(species))
grd_100_inRMobsepie <- occur_obse_pie %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMobsepie=n_distinct(species))
# grd_100_t1 <- occur_t1 %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesT1=n_distinct(species))
# grd_100_t2 <- occur_t2 %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciest2=n_distinct(species))
# grd_100_t3 <- occur_t3 %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciest3=n_distinct(species))
# grd_100_t4 <- occur_t4 %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciest4=n_distinct(species))
# merge back with 100 grid
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_allRich), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMRich), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMspecRich), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMobseRich), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMnym), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMpap), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMlyc), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMhes), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMpie), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMspecnym), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMspecpap), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMspeclyc), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMspeches), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMspecpie), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMobsenym), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMobsepap), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMobselyc), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMobsehes), by="FID")
grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_inRMobsepie), by="FID")
# grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_t1), by="FID")
# grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_t2), by="FID")
# grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_t3), by="FID")
# grd_100 <- grd_100 %>% left_join(as.data.frame(grd_100_t4), by="FID")
# 200 meter resolution
grd_200_allRich <- occur %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesAll=n_distinct(species))
grd_200_inRMRich <- occur_inRM %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRM=n_distinct(species))
grd_200_inRMSpecRich <- occur_spec %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRMSpec=n_distinct(species))
grd_200_inRMObseRich <- occur_obse %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRMObse=n_distinct(species))
grd_200_inRMObsenym <- occur_obse_nym %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRMObseNym=n_distinct(species))
grd_200_inRMObsepap <- occur_obse_pap %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRMObsePap=n_distinct(species))
grd_200_inRMObselyc <- occur_obse_lyc %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRMObselyc=n_distinct(species))
grd_200_inRMObsehes <- occur_obse_hes %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRMObsehes=n_distinct(species))
grd_200_inRMObsepie <- occur_obse_pie %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesInRMObsepie=n_distinct(species))
# grd_200_t1 <- occur_t1 %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciesT1=n_distinct(species))
# grd_200_t2 <- occur_t2 %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciest2=n_distinct(species))
# grd_200_t3 <- occur_t3 %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciest3=n_distinct(species))
# grd_200_t4 <- occur_t4 %>% st_join(grd_200) %>% group_by(FID) %>% summarise(n_speciest4=n_distinct(species))
# merge back with 200 grid
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_allRich), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMRich), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMSpecRich), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMObseRich), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMObsenym), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMObsepap), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMObselyc), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMObsehes), by="FID")
grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_inRMObsepie), by="FID")
# grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_t1), by="FID")
# grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_t2), by="FID")
# grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_t3), by="FID")
# grd_200 <- grd_200 %>% left_join(as.data.frame(grd_200_t4), by="FID")
# 400 meter resolution
grd_400_allRich <- occur %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesAll=n_distinct(species))
grd_400_inRMRich <- occur_inRM %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRM=n_distinct(species))
grd_400_inRMSpecRich <- occur_spec %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRMSpec=n_distinct(species))
grd_400_inRMObseRich <- occur_obse %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRMObse=n_distinct(species))
grd_400_inRMObsenym <- occur_obse_nym %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRMObseNym=n_distinct(species))
grd_400_inRMObsepap <- occur_obse_pap %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRMObsePap=n_distinct(species))
grd_400_inRMObselyc <- occur_obse_lyc %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRMObselyc=n_distinct(species))
grd_400_inRMObsehes <- occur_obse_hes %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRMObsehes=n_distinct(species))
grd_400_inRMObsepie <- occur_obse_pie %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesInRMObsepie=n_distinct(species))
# grd_400_t1 <- occur_t1 %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciesT1=n_distinct(species))
# grd_400_t2 <- occur_t2 %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciest2=n_distinct(species))
# grd_400_t3 <- occur_t3 %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciest3=n_distinct(species))
# grd_400_t4 <- occur_t4 %>% st_join(grd_400) %>% group_by(FID) %>% summarise(n_speciest4=n_distinct(species))
# merge back with 400 grid
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_allRich), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMRich), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMSpecRich), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMObseRich), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMObsenym), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMObsepap), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMObselyc), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMObsehes), by="FID")
grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_inRMObsepie), by="FID")
# grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_t1), by="FID")
# grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_t2), by="FID")
# grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_t3), by="FID")
# grd_400 <- grd_400 %>% left_join(as.data.frame(grd_400_t4), by="FID")
# 800 meter resolution
# grd_800_allRich <- occur %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesAll=n_distinct(species))
# grd_800_inRMRich <- occur_inRM %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRM=n_distinct(species))
# grd_800_inRMSpecRich <- occur_spec %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRMSpec=n_distinct(species))
# grd_800_inRMObseRich <- occur_obse %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRMObse=n_distinct(species))
#
# grd_800_inRMObsenym <- occur_obse_nym %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRMObseNym=n_distinct(species))
# grd_800_inRMObsepap <- occur_obse_pap %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRMObsePap=n_distinct(species))
# grd_800_inRMObselyc <- occur_obse_lyc %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRMObselyc=n_distinct(species))
# grd_800_inRMObsehes <- occur_obse_hes %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRMObsehes=n_distinct(species))
# grd_800_inRMObsepie <- occur_obse_pie %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesInRMObsepie=n_distinct(species))
#
# grd_800_t1 <- occur_t1 %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciesT1=n_distinct(species))
# grd_800_t2 <- occur_t2 %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciest2=n_distinct(species))
# grd_800_t3 <- occur_t3 %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciest3=n_distinct(species))
# grd_800_t4 <- occur_t4 %>% st_join(grd_800) %>% group_by(FID) %>% summarise(n_speciest4=n_distinct(species))
#
# # merge back with 800 grid
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_allRich), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMRich), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMSpecRich), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMObseRich), by="FID")
#
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMObsenym), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMObsepap), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMObselyc), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMObsehes), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_inRMObsepie), by="FID")
#
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_t1), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_t2), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_t3), by="FID")
# grd_800 <- grd_800 %>% left_join(as.data.frame(grd_800_t4), by="FID")
####################
# calculate ratios #
####################
# 100 km resolution
grd_100 <- grd_100 %>% mutate(logInRMOccur_Buff = n_speciesInRM/Buff100,
logInRMSpec_Buff = n_speciesInRMSpec/Buff100,
logInRMObse_Buff = n_speciesInRMObse/Buff100,
nymobsRatio = n_speciesInRMobseNym/Buff100_nym,
papobsRatio = n_speciesInRMobsePap/Buff100_pap,
lycobsRatio = n_speciesInRMobselyc/Buff100_lyc,
hesobsRatio = n_speciesInRMobsehes/Buff100_hes,
pieobsRatio = n_speciesInRMobsepie/Buff100_pie,
nymspecRatio = n_speciesInRMspecNym/Buff100_nym,
papspecRatio = n_speciesInRMspecPap/Buff100_pap,
lycspecRatio = n_speciesInRMspeclyc/Buff100_lyc,
hesspecRatio = n_speciesInRMspeches/Buff100_hes,
piespecRatio = n_speciesInRMspecpie/Buff100_pie,
nymRatio = n_speciesInRMNym/Buff100_nym,
papRatio = n_speciesInRMPap/Buff100_pap,
lycRatio = n_speciesInRMlyc/Buff100_lyc,
hesRatio = n_speciesInRMhes/Buff100_hes,
pieRatio = n_speciesInRMpie/Buff100_pie) %>%
# t1Ratio = n_speciesT1/Buff100,
# t2Ratio = n_speciest2/Buff100,
# t3Ratio = n_speciest3/Buff100,
# t4Ratio = n_speciest4/Buff100) %>%
mutate(logInRMOccur_Buff = ifelse(logInRMOccur_Buff > 1, 1, logInRMOccur_Buff),
logInRMSpec_Buff = ifelse(logInRMSpec_Buff > 1, 1, logInRMSpec_Buff),
logInRMObse_Buff = ifelse(logInRMObse_Buff > 1, 1, logInRMObse_Buff),
nymobsRatio = ifelse(nymobsRatio > 1, 1, nymobsRatio),
papobsRatio = ifelse(papobsRatio > 1, 1, papobsRatio),
lycobsRatio = ifelse(lycobsRatio > 1, 1, lycobsRatio),
hesobsRatio = ifelse(hesobsRatio > 1, 1, hesobsRatio),
pieobsRatio = ifelse(pieobsRatio > 1, 1, pieobsRatio),
nymspecRatio = ifelse(nymspecRatio > 1, 1, nymspecRatio),
papspecRatio = ifelse(papspecRatio > 1, 1, papspecRatio),
lycspecRatio = ifelse(lycspecRatio > 1, 1, lycspecRatio),
hesspecRatio = ifelse(hesspecRatio > 1, 1, hesspecRatio),
piespecRatio = ifelse(piespecRatio > 1, 1, piespecRatio),
nymRatio = ifelse(nymRatio > 1, 1, nymRatio),
papRatio = ifelse(papRatio > 1, 1, papRatio),
lycRatio = ifelse(lycRatio > 1, 1, lycRatio),
hesRatio = ifelse(hesRatio > 1, 1, hesRatio),
pieRatio = ifelse(pieRatio > 1, 1, pieRatio))
# t1Ratio = ifelse(t1Ratio > 1, 1, t1Ratio),
# t2Ratio = ifelse(t2Ratio > 1, 1, t2Ratio),
# t3Ratio = ifelse(t3Ratio > 1, 1, t3Ratio),
# t4Ratio = ifelse(t4Ratio > 1, 1, t4Ratio))
# 200 km resolution
grd_200 <- grd_200 %>% mutate(logAllOccur_NoBuff = n_speciesAll/noBuff200,
logInRMOccur_NoBuff = n_speciesInRM/noBuff200,
logInRMOccur_Buff = n_speciesInRM/noBuff200,
logInRMSpec_Buff = n_speciesInRMSpec/noBuff200,
logInRMObse_Buff = n_speciesInRMObse/noBuff200,
nymObsRatio = n_speciesInRMObseNym/noBuff200_nym,
papObsRatio = n_speciesInRMObsePap/noBuff200_pap,
lycObsRatio = n_speciesInRMObselyc/noBuff200_lyc,
hesObsRatio = n_speciesInRMObsehes/noBuff200_hes,
pieObsRatio = n_speciesInRMObsepie/noBuff200_pie) %>%
# t1Ratio = n_speciesT1/Buff200,
# t2Ratio = n_speciest2/Buff200,
# t3Ratio = n_speciest3/Buff200,
# t4Ratio = n_speciest4/Buff200) %>%
mutate(logInRMOccur_Buff = ifelse(logInRMOccur_Buff > 1, 1, logInRMOccur_Buff),
logInRMSpec_Buff = ifelse(logInRMSpec_Buff > 1, 1, logInRMSpec_Buff),
logInRMObse_Buff = ifelse(logInRMObse_Buff > 1, 1, logInRMObse_Buff),
nymObsRatio = ifelse(nymObsRatio > 1, 1, nymObsRatio),
papObsRatio = ifelse(papObsRatio > 1, 1, papObsRatio),
lycObsRatio = ifelse(lycObsRatio > 1, 1, lycObsRatio),
hesObsRatio = ifelse(hesObsRatio > 1, 1, hesObsRatio),
pieObsRatio = ifelse(pieObsRatio > 1, 1, pieObsRatio))
# t1Ratio = ifelse(t1Ratio > 1, 1, t1Ratio),
# t2Ratio = ifelse(t2Ratio > 1, 1, t2Ratio),
# t3Ratio = ifelse(t3Ratio > 1, 1, t3Ratio),
# t4Ratio = ifelse(t4Ratio > 1, 1, t4Ratio))
# 400 km resolution
grd_400 <- grd_400 %>% mutate(logAllOccur_NoBuff = n_speciesAll/noBuff400,
logInRMOccur_NoBuff = n_speciesInRM/noBuff400,
logInRMOccur_Buff = n_speciesInRM/noBuff400,
logInRMSpec_Buff = n_speciesInRMSpec/noBuff400,
logInRMObse_Buff = n_speciesInRMObse/noBuff400,
nymObsRatio = n_speciesInRMObseNym/noBuff400_nym,
papObsRatio = n_speciesInRMObsePap/noBuff400_pap,
lycObsRatio = n_speciesInRMObselyc/noBuff400_lyc,
hesObsRatio = n_speciesInRMObsehes/noBuff400_hes,
pieObsRatio = n_speciesInRMObsepie/noBuff400_pie) %>%
# t1Ratio = n_speciesT1/Buff400,
# t2Ratio = n_speciest2/Buff400,
# t3Ratio = n_speciest3/Buff400,
# t4Ratio = n_speciest4/Buff400) %>%
mutate(logInRMOccur_Buff = ifelse(logInRMOccur_Buff > 1, 1, logInRMOccur_Buff),
logInRMSpec_Buff = ifelse(logInRMSpec_Buff > 1, 1, logInRMSpec_Buff),
logInRMObse_Buff = ifelse(logInRMObse_Buff > 1, 1, logInRMObse_Buff),
nymObsRatio = ifelse(nymObsRatio > 1, 1, nymObsRatio),
papObsRatio = ifelse(papObsRatio > 1, 1, papObsRatio),
lycObsRatio = ifelse(lycObsRatio > 1, 1, lycObsRatio),
hesObsRatio = ifelse(hesObsRatio > 1, 1, hesObsRatio),
pieObsRatio = ifelse(pieObsRatio > 1, 1, pieObsRatio))
# t1Ratio = ifelse(t1Ratio > 1, 1, t1Ratio),
# t2Ratio = ifelse(t2Ratio > 1, 1, t2Ratio),
# t3Ratio = ifelse(t3Ratio > 1, 1, t3Ratio),
# t4Ratio = ifelse(t4Ratio > 1, 1, t4Ratio))
# # 800 km resolution
# grd_800 <- grd_800 %>% mutate(logAllOccur_NoBuff = n_speciesAll/noBuff800,
# logInRMOccur_NoBuff = n_speciesInRM/noBuff800,
# logInRMOccur_Buff = n_speciesInRM/Buff800,
# logInRMSpec_Buff = n_speciesInRMSpec/Buff800,
# logInRMObse_Buff = n_speciesInRMObse/Buff800,
# nymObsRatio = n_speciesInRMObseNym/Buff800_nym,
# papObsRatio = n_speciesInRMObsePap/Buff800_pap,
# lycObsRatio = n_speciesInRMObselyc/Buff800_lyc,
# hesObsRatio = n_speciesInRMObsehes/Buff800_hes,
# pieObsRatio = n_speciesInRMObsepie/Buff800_pie,
# t1Ratio = n_speciesT1/Buff800,
# t2Ratio = n_speciest2/Buff800,
# t3Ratio = n_speciest3/Buff800,
# t4Ratio = n_speciest4/Buff800) %>%
# mutate(logInRMOccur_Buff = ifelse(logInRMOccur_Buff > 1, 1, logInRMOccur_Buff),
# logInRMSpec_Buff = ifelse(logInRMSpec_Buff > 1, 1, logInRMSpec_Buff),
# logInRMObse_Buff = ifelse(logInRMObse_Buff > 1, 1, logInRMObse_Buff),
# nymObsRatio = ifelse(nymObsRatio > 1, 1, nymObsRatio),
# papObsRatio = ifelse(papObsRatio > 1, 1, papObsRatio),
# lycObsRatio = ifelse(lycObsRatio > 1, 1, lycObsRatio),
# hesObsRatio = ifelse(hesObsRatio > 1, 1, hesObsRatio),
# pieObsRatio = ifelse(pieObsRatio > 1, 1, pieObsRatio),
# t1Ratio = ifelse(t1Ratio > 1, 1, t1Ratio),
# t2Ratio = ifelse(t2Ratio > 1, 1, t2Ratio),
# t3Ratio = ifelse(t3Ratio > 1, 1, t3Ratio),
# t4Ratio = ifelse(t4Ratio > 1, 1, t4Ratio))
#######################
## Sample Covariates ##
#######################
grd_100 <- unique(grd_100)
# RCP 8.5 Climate Velocity into 2085
grd_100$rcp85_2080 <- extract(rcp8.5_2080, grd_100, fun=mean, na.rm=TRUE)
grd_100 <- grd_100 %>% mutate(clim.rank = percent_rank(rcp85_2080)) %>%
mutate(clim.cat = ifelse(clim.rank >= 0.95, "2", ifelse(clim.rank >= 0.80, "1", "0")))
grd_100$clim.cat <- as.character(grd_100$clim.cat)
grd_100pt <- st_centroid(grd_100)
grd_100pt <- grd_100pt %>% mutate(x.crd = st_coordinates(grd_100pt)[,1], y.crd = st_coordinates(grd_100pt)[,2])
climate <- dplyr::select(grd_100pt, clim.cat, logInRMOccur_Buff)
grd_100pt <- grd_100pt %>% mutate(logInRMOccur_Buff=replace_na(logInRMOccur_Buff, 0))
# plot climate velocities on grid, highlight 80th and 95th percentiles
tiff("climateMap_legend.tiff", units="cm", width=12.5, height=12.5, res=350)
ggplot()+
geom_sf(data=land, fill="white", color="black")+
#geom_sf(grd_100, mapping=aes(fill=clim.cat), color=NA, alpha=0.7)+
geom_point(grd_100pt, mapping=aes(x=x.crd, y=y.crd,
color=clim.cat,
size=1-logInRMOccur_Buff),
alpha=0.5, shape=15)+
scale_color_manual(values=c("#3c9ab2", "#e8c927", "#f22300"))+
#scale_color_manual(values=c("black", "white"))+
scale_size_continuous(range=c(0.01, 1.99999))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()
dev.off()
climate <- filter(climate, !is.na(rcp85_2080))
# get percent sampled over 80% for each climate percentile.
1-nrow(filter(climate, clim.cat==1, logInRMOccur_Buff >= 0.80))/nrow(filter(climate, clim.cat==1))
1-nrow(filter(climate, clim.cat==2, logInRMOccur_Buff >= 0.80))/nrow(filter(climate, clim.cat==2))
# human footprint and majority biome
library(exactextractr)
getMode <- function(x,...){
uniqx <- unique(x)
uniqx <- uniqx[!is.na(uniqx)]
uniqx[which.max(tabulate(match(x, uniqx)))]
}
nature <- fread("covariates/PA_grid/PA_grid_area.csv")
grd_100$ID <- paste(grd_100$X, grd_100$Y)
grd_100 <- grd_100 %>% left_join(nature, by="ID")
grd_100$footprint <- exact_extract(footprint, grd_100, fun="mean")
grd_100$biome <- exact_extract(biomes, grd_100, fun="majority")
fit.all <- lm(logInRMOccur_Buff~footprint+grid_area, data=grd_100)
fit.spec <- lm(logInRMSpec_Buff~footprint+grid_area, data=grd_100)
fit.obse <- lm(logInRMObse_Buff~footprint+grid_area, data=grd_100)
fit.all.1 <- lm(logInRMOccur_Buff~footprint, data=grd_100)
fit.spec.1 <- lm(logInRMSpec_Buff~footprint, data=grd_100)
fit.obse.1 <- lm(logInRMObse_Buff~footprint, data=grd_100)
extractAIC(fit.all)-extractAIC(fit.all.1)
extractAIC(fit.spec)-extractAIC(fit.spec.1)
extractAIC(fit.obse)-extractAIC(fit.obse.1)
t.test(grd_100$logInRMSpec_Buff, grd_100$logInRMObse_Buff)
sd(na.omit(grd_100$logInRMObse_Buff))/sqrt(length(na.omit(grd_100$logInRMObse_Buff)))
sd(na.omit(grd_100$logInRMSpec_Buff))/sqrt(length(na.omit(grd_100$logInRMSpec_Buff)))
# get summary stats for biomes
grd_100$biome <- round(grd_100$biome, digits=0)
biom.st <- as.data.frame(dplyr::select(grd_100, biome, logInRMOccur_Buff, logInRMObse_Buff, logInRMSpec_Buff,
nymobsRatio, papobsRatio, lycobsRatio, hesobsRatio, pieobsRatio)) %>%
mutate(biome=floor(biome))
biom.fl <- biom.st %>% group_by(biome) %>%
summarise(meanAll=mean(logInRMOccur_Buff, na.rm=TRUE), meanobse=mean(logInRMObse_Buff, na.rm=TRUE), meanSpec=mean(logInRMSpec_Buff, na.rm=TRUE),
meanNym=mean(nymobsRatio, na.rm=TRUE), meanPap=mean(papobsRatio, na.rm=TRUE), meanLyc=mean(lycobsRatio, na.rm=TRUE), meanHes=mean(hesobsRatio, na.rm=TRUE),
meanPie=mean(pieobsRatio, na.rm=TRUE),
sdAll=sd(logInRMOccur_Buff, na.rm=TRUE), sdobse=sd(logInRMObse_Buff, na.rm=TRUE), sdSpec=sd(logInRMSpec_Buff, na.rm=TRUE),
sdNym=sd(nymobsRatio, na.rm=TRUE), sdPap=sd(papobsRatio, na.rm=TRUE), sdLyc=sd(lycobsRatio, na.rm=TRUE), sdHes=sd(hesobsRatio, na.rm=TRUE),
sdPie=sd(pieobsRatio, na.rm=TRUE), count=n()) %>%
mutate(colorAll = ifelse(meanAll >= 0.8, "> 80%", ifelse(meanAll >= 0.5, "50% <= x < 80%", "< 50%")),
colorobs = ifelse(meanobse >= 0.8, "> 80%", ifelse(meanobse >= 0.5, "50% <= x < 80%", "< 50%")),
colorSpe = ifelse(meanSpec >= 0.8, "> 80%", ifelse(meanSpec >= 0.5, "50% <= x < 80%", "< 50%")))
biom.st <- biom.st %>% group_by(biome) %>%
summarise(meanAll=mean(logInRMOccur_Buff, na.rm=TRUE), meanobse=mean(logInRMObse_Buff, na.rm=TRUE), meanSpec=mean(logInRMSpec_Buff, na.rm=TRUE),
meanNym=mean(nymobsRatio, na.rm=TRUE), meanPap=mean(papobsRatio, na.rm=TRUE), meanLyc=mean(lycobsRatio, na.rm=TRUE), meanHes=mean(hesobsRatio, na.rm=TRUE),
meanPie=mean(pieobsRatio, na.rm=TRUE),
sdAll=sd(logInRMOccur_Buff, na.rm=TRUE), sdobse=sd(logInRMObse_Buff, na.rm=TRUE), sdSpec=sd(logInRMSpec_Buff, na.rm=TRUE),
sdNym=sd(nymobsRatio, na.rm=TRUE), sdPap=sd(papobsRatio, na.rm=TRUE), sdLyc=sd(lycobsRatio, na.rm=TRUE), sdHes=sd(hesobsRatio, na.rm=TRUE),
sdPie=sd(pieobsRatio, na.rm=TRUE), count=n()) %>%
mutate(colorAll = ifelse(meanAll >= 0.8, ">= 80%", ifelse(meanAll >= 0.5, "50% <= x < 80%", "< 50%")),
colorobs = ifelse(meanobse >= 0.8, ">= 80%", ifelse(meanobse >= 0.5, "50% <= x < 80%", "< 50%")),
colorSpe = ifelse(meanSpec >= 0.8, ">= 80%", ifelse(meanSpec >= 0.5, "50% <= x < 80%", "< 50%"))) %>%
filter(count > 10) %>%
mutate(coord=row_number(), coordAll=1, coordobse=3, coordSpec=5, coordNym=8, coordPap=10, coordLyc=12, coordHes=14, coordPie=16)
library(ggforce)
tiff("circles.tiff", units="cm", width=8.3, height=8.3, res=350)
ggplot(biom.st)+
geom_circle(aes(x0=coordAll, y0=coord, r=meanAll/2+sdAll/2, color=colorAll, fill=colorAll), show.legend=FALSE)+
geom_circle(aes(x0=coordAll, y0=coord, r=meanAll/2-sdAll/2, color=colorAll), fill="white")+
geom_circle(aes(x0=coordAll, y0=coord, r=meanAll/2), color="black", fill=NA)+
geom_circle(aes(x0=coordobse, y0=coord, r=meanobse/2+sdobse/2, color=colorobs, fill=colorobs), show.legend=FALSE)+
geom_circle(aes(x0=coordobse, y0=coord, r=meanobse/2-sdobse/2, color=colorobs), fill="white")+
geom_circle(aes(x0=coordobse, y0=coord, r=meanobse/2), color="black", fill=NA)+
geom_circle(aes(x0=coordSpec, y0=coord, r=meanSpec/2+sdSpec/2, color=colorSpe, fill=colorSpe), show.legend=FALSE)+
geom_circle(aes(x0=coordSpec, y0=coord, r=meanSpec/2-sdSpec/2, color=colorSpe), fill="white")+
geom_circle(aes(x0=coordSpec, y0=coord, r=meanSpec/2), color="black", fill=NA)+
# geom_circle(aes(x0=coordHes, y0=coord, r=meanHes/2+sdHes/2), color="grey", fill="grey")+
# geom_circle(aes(x0=coordHes, y0=coord, r=meanHes/2-sdHes/2), color="grey", fill="white")+
# geom_circle(aes(x0=coordHes, y0=coord, r=meanHes/2), bolor="black", fill=NA)+
# geom_circle(aes(x0=coordLyc, y0=coord, r=meanLyc/2+sdLyc/2), color="grey", fill="grey")+
# geom_circle(aes(x0=coordLyc, y0=coord, r=meanLyc/2-sdLyc/2), color="grey", fill="white")+
# geom_circle(aes(x0=coordLyc, y0=coord, r=meanLyc/2), bolor="black", fill=NA)+
# geom_circle(aes(x0=coordNym, y0=coord, r=meanNym/2+sdNym/2), color="grey", fill="grey")+
# geom_circle(aes(x0=coordNym, y0=coord, r=meanNym/2-sdNym/2), color="grey", fill="white")+
# geom_circle(aes(x0=coordNym, y0=coord, r=meanNym/2), bolor="black", fill=NA)+
# geom_circle(aes(x0=coordPap, y0=coord, r=meanPap/2+sdPap/2), color="grey", fill="grey")+
# geom_circle(aes(x0=coordPap, y0=coord, r=meanPap/2-sdPap/2), color="grey", fill="white")+
# geom_circle(aes(x0=coordPap, y0=coord, r=meanPap/2), bolor="black", fill=NA)+
# geom_circle(aes(x0=coordPie, y0=coord, r=meanPie/2+sdPie/2), color="grey", fill="grey")+
# geom_circle(aes(x0=coordPie, y0=coord, r=meanPie/2-sdPie/2), color="grey", fill="white")+
# geom_circle(aes(x0=coordPie, y0=coord, r=meanPie/2), color="black", fill=NA)+
#geom_circle(aes(x0=c(0,1,2,3), y0=c(0,0,0,0), r=c(0.25/2, 0.5/2, 0.75/2, 1/2)))+
scale_fill_manual(values=c("firebrick2", "seagreen3", "goldenrod1"))+
scale_color_manual(values=c("firebrick2", "seagreen3", "goldenrod1"))+
xlim(0, 11) + ylim(0, 11)+
theme_void()+
theme(legend.position = "none") + labs(tag="(a)")
dev.off()
b.ply <- st_read("covariates/WWFBiomes.shp")
b.ply <- st_transform(b.ply, crs.1)
b.ply <- b.ply %>% left_join(biom.st, by=c("BIOME" = "biome"))
tiff("circles_legend.tiff", units="cm", width=8.3, height=8.3, res=350)
ggplot()+
geom_circle(aes(x0=c(1,2,3,4), y0=c(1,1,1,1), r=c(0.25/2, 0.5/2, 0.75/2, 1/2)))+
xlim(0, 11) + ylim(0, 11)+
theme_void() + labs(tag=" ")
dev.off()
# ggplot()+
# geom_sf(data=land, fill="grey", color=NA)+
# geom_sf(b.ply, mapping=aes(fill=as.factor(BIOME)), color="grey", size=0.05)+
# scale_fill_manual(values=c("#839791", "#839791", "#839791",
# "#839791", "#839791", "#839791",
# "#F28F3B", "#F28F3B", "#F28F3B",
# "#F28F3B", "#575761", "#F4D6CC",
# "#F4D6CC", "#F4D6CC", NA, NA))+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
tiff("biome_map.tiff", units="in", width=8.3, height=8.3, res=350)
ggplot()+
geom_sf(data=land, fill="grey", color=NA)+
geom_sf(b.ply, mapping=aes(fill=colorAll), color=NA, size=0.05)+
scale_fill_manual(values=c("firebrick2", "seagreen3", "goldenrod1"))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+labs(tag="(b)")
dev.off()
###################
# yearly analysis #
###################
yearly <- data.frame(startYear <- integer(),
endYear <- integer(),
specCom <- double(),
obsCom <- double(),
specAve <- double(),
obsAve <- double())
grd_100c <- grd_100
# biyearly
i = 1950
while(i < 2019){
print(paste("Processing years: ", i, "-", i+1))
grd_100c <- grd_100
yearly[i-1949,1] <- i
yearly[i-1949,2] <- i+1
occur_spec <- filter(occur_inRM, between(year, i, i+1), basis!="HUMAN_OBSERVATION")
occur_obse <- filter(occur_inRM, between(year, i, i+1), basis=="HUMAN_OBSERVATION")
grd_100_biyearlyspec <- occur_spec %>% st_join(grd_100c) %>% group_by(FID) %>% summarise(n_speciesInRMSpec=n_distinct(species))
grd_100_biyearlyobs <- occur_obse %>% st_join(grd_100c) %>% group_by(FID) %>% summarise(n_speciesInRMObse=n_distinct(species))
grd_100c <- grd_100c %>% left_join(as.data.frame(grd_100_biyearlyspec), by="FID")
grd_100c <- grd_100c %>% left_join(as.data.frame(grd_100_biyearlyobs), by="FID")
grd_100c <- grd_100c %>% mutate(logInRMSpec_Buff = n_speciesInRMSpec.y/Buff100,
logInRMObse_Buff = n_speciesInRMObse.y/Buff100) %>%
mutate(logInRMSpec_Buff = ifelse(logInRMSpec_Buff > 1, 1, logInRMSpec_Buff),
logInRMObse_Buff = ifelse(logInRMObse_Buff > 1, 1, logInRMObse_Buff))
yearly[i-1949,3] <- nrow(filter(grd_100c, logInRMSpec_Buff >= 0.8))
yearly[i-1949,4] <- nrow(filter(grd_100c, logInRMObse_Buff >= 0.8))
yearly[i-1949,5] <- mean(na.omit(grd_100c$logInRMSpec_Buff))
yearly[i-1949,6] <- mean(na.omit(grd_100c$logInRMObse_Buff))
i = i+2
}
yearly <- na.omit(yearly)
yearly <- yearly %>% mutate(total = specCom....double..+obsCom....double..+
(nrow(grd_100)-specCom....double..+obsCom....double..),
percSpec = specCom....double../total,
obsCom = obsCom....double../total,
unsamp = (nrow(grd_100)-specCom....double..+obsCom....double..)/total,
ratioS = specCom....double../(specCom....double..+obsCom....double..),
ratioO = obsCom....double../(specCom....double..+obsCom....double..))
# ggplot()+
# geom_line(aes(x=yearly$startYear....integer.., y=yearly$specCom....double..), color="red4", lwd=1.1)+
# geom_point(aes(x=yearly$startYear....integer.., y=yearly$specCom....double..), color="red4", fill="white", pch=21, size=2)+
# geom_line(aes(x=yearly$startYear....integer.., y=yearly$obsCom....double..), color="royalblue4", lwd=1.1)+
# geom_point(aes(x=yearly$startYear....integer.., y=yearly$obsCom....double..), color="royalblue4", fill="white", pch=21, size=2)+
# labs(y="Number of 100km Cells Over 80% Complete", x="Years of Sampling")+
# scale_fill_manual(values=c("red4", "royalblue4"), labels=c("Specimen Data", "Observation Data"))+
# theme_minimal()
# ggplot()+
# geom_line(aes(x=yearly$startYear....integer.., y=yearly$specAve....double..), color="red4", lwd=1.1)+
# geom_point(aes(x=yearly$startYear....integer.., y=yearly$specAve....double..), color="red4", fill="white", pch=21, size=2)+
# geom_line(aes(x=yearly$startYear....integer.., y=yearly$obsAve....double..), color="royalblue4", lwd=1.1)+
# geom_point(aes(x=yearly$startYear....integer.., y=yearly$obsAve....double..), color="royalblue4", fill="white", pch=21, size=2)+
# labs(y="Average Sampling Completeness Across All Cells", x="Years of Sampling")+
# theme_minimal()
yearly.long <- yearly %>% dplyr::select(startYear....integer.., specCom....double.., obsCom....double..) %>% reshape2::melt(id.vars="startYear....integer..")
# Supplemental 1
p1 <- ggplot()+
geom_bar(aes(y=value, x=startYear....integer.., fill=variable), data=yearly.long, stat="identity", width = 1.8)+
theme_minimal()+labs(x="Sampling Start Year", y="Count of Cells over 80% Complete")+
scale_x_continuous(breaks=seq(1950, 2019, 2))+
scale_fill_manual(values=c("red3", "royalblue3"), labels = c("Museum Specimens", "Community Observations"))+
theme(axis.text.x=element_text(angle=90, hjust=1, vjust=0.2), panel.background = element_rect(fill = "white", colour = "grey50"),
panel.grid.minor = element_blank(), panel.grid.major.x = element_blank(), legend.title = element_blank(),
legend.position = "none", axis.text=element_text(size=12)) + labs(tag="(b)")
sup <- as.data.frame(occur_inRM) %>% mutate(basis=recode(basis, preservedspecimen="PRESERVED_SPECIMEN",
OBSERVATION="HUMAN_OBSERVATION",
MATERIAL_SAMPLE="PRESERVED_SPECIMEN"))
sup <- sup %>% filter(basis %in% c("HUMAN_OBSERVATION",
"PRESERVED_SPECIMEN")) %>% group_by(year, basis) %>% tally()
p2 <- ggplot()+
geom_bar(aes(y=n, x=year, fill=basis), data=sup, stat="identity", width=1)+
theme_minimal()+labs(x="Sampling Year", y="Number of Records")+
scale_fill_manual(values=c("royalblue3", "red3"), labels = c("Museum Specimens", "Community Observations"))+
theme(axis.text.x=element_text(angle=90, hjust=1, vjust=0.2), panel.background = element_rect(fill = "white", colour = "grey50"),
panel.grid.minor = element_blank(), panel.grid.major.x = element_blank(), legend.title = element_blank(),
legend.position = "none", axis.text=element_text(size=12))+
scale_y_continuous(label=comma) + labs(tag="(a)")
tiff("Supp1.tiff", units="in", width=7, height=6, res=350)
grid.arrange(p2, p1, nrow=2)
dev.off()
library(ggsignif)
# Between family ANOVAs
fams <- as.data.frame(grd_100) %>% dplyr::select(FID, nymRatio, papRatio, lycRatio, hesRatio, pieRatio)
fams <- fams %>% reshape2::melt(id.vars="FID")
aov.all <- aov(value~variable, data=fams)
TukeyHSD(aov.all)
fams.obs <- as.data.frame(grd_100) %>% dplyr::select(FID, nymobsRatio, papobsRatio, lycobsRatio, hesobsRatio, pieobsRatio)
fams.obs <- fams.obs %>% reshape2::melt(id.vars="FID")
aov.obs <- aov(value~variable, data=fams.obs)
TukeyHSD(aov.obs)
fams.spec <- as.data.frame(grd_100) %>% dplyr::select(FID, nymspecRatio, papspecRatio, lycspecRatio, hesspecRatio, piespecRatio)
fams.spec <- fams.spec %>% reshape2::melt(id.vars="FID")
aov.spec <- aov(value~variable, data=fams.spec)
TukeyHSD(aov.spec)
fams.chi <- as.data.frame(grd_100) %>% filter(logInRMOccur_Buff >= 0.5) %>% dplyr::select(FID, nymspecRatio, papspecRatio, lycspecRatio, hesspecRatio, piespecRatio,
nymobsRatio, papobsRatio, lycobsRatio, hesobsRatio, pieobsRatio)
# report counts for chi-square
nrow(filter(fams.chi, nymspecRatio > 0.5))
nrow(filter(fams.chi, nymobsRatio > 0.5))
nrow(filter(fams.chi, papspecRatio > 0.5))
nrow(filter(fams.chi, papobsRatio > 0.5))
nrow(filter(fams.chi, lycspecRatio > 0.5))
nrow(filter(fams.chi, lycobsRatio > 0.5))
nrow(filter(fams.chi, hesspecRatio > 0.5))
nrow(filter(fams.chi, hesobsRatio > 0.5))
nrow(filter(fams.chi, piespecRatio > 0.5))
nrow(filter(fams.chi, pieobsRatio > 0.5))
#write.csv(fams.chi, "chi_raw.csv")
library(chisq.posthoc.test)
fams.chi <- read.csv("chi_raw.csv")
rownames(fams.chi) <- fams.chi[,1]
fams.chi <- fams.chi[,-1]
chisq.test(fams.chi)
chisq.posthoc.test(fams.chi, method="bonferroni", round=6)
fams.chi <- read.csv("chi_plots.csv")
chi.nym <- ggplot()+
geom_bar(data=filter(fams.chi, Family=="Nym"), aes(x=Type, y=Value, col=Type, fill=Type), stat="identity",
show.legend=FALSE)+xlab("")+ylab("")+scale_fill_manual(values=c("royalblue3", "red3"))+
scale_color_manual(values=c("royalblue3", "red3"))+ylim(0,600)+
theme_minimal() + labs(tag=" ")
chi.pap <- ggplot()+
geom_bar(data=filter(fams.chi, Family=="Pap"), aes(x=Type, y=Value), stat="identity",
show.legend=FALSE)+xlab("")+ylab("")+ylim(0,600)+
theme_minimal()+ labs(tag=" ")
chi.lyc <- ggplot()+
geom_bar(data=filter(fams.chi, Family=="Lyc"), aes(x=Type, y=Value), stat="identity",
show.legend=FALSE)+xlab("")+ylab("")+ylim(0,600)+
theme_minimal()+ labs(tag=" ")
chi.hes <- ggplot()+
geom_bar(data=filter(fams.chi, Family=="Hes"), aes(x=Type, y=Value), stat="identity",
show.legend=FALSE)+xlab("")+ylab("")+ylim(0,600)+
theme_minimal() + labs(tag="(d)")
chi.pie <- ggplot()+
geom_bar(data=filter(fams.chi, Family=="Pie"), aes(x=Type, y=Value, col=Type, fill=Type), stat="identity",
show.legend=FALSE)+xlab("")+ylab("")+scale_fill_manual(values=c("royalblue3", "red3"))+
scale_color_manual(values=c("royalblue3", "red3"))+ylim(0,600)+
theme_minimal() + labs(tag=" ")
library(ggsignif)
png("tukeys.png", units="cm", height = 12, width = 20, res=400)
p1 <- ggplot(fams, aes(x=variable, y=value))+
geom_boxplot(fill="grey80", color="black")+
scale_x_discrete() + xlab("Family")+
ylab("Completeness")+
geom_signif(comparisons=list(c("nymRatio", "papRatio"),
c("nymRatio", "lycRatio"),
c("nymRatio", "hesRatio"),
c("nymRatio", "pieRatio"),
c("lycRatio", "papRatio"),
c("hesRatio", "papRatio"),
c("pieRatio", "papRatio"),
c("hesRatio", "lycRatio"),
c("pieRatio", "lycRatio"),
c("pieRatio", "hesRatio")),
map_signif_level = TRUE,
tip_length=0,
y_position = c(-0.1, -0.2, -0.3, -0.4, 1.1, 1.2, 1.3, -0.6, -0.7, 1.5)) +
theme_minimal()+ scale_x_discrete(labels=c("Nym", "Pap", "Lyc", "Hes", "Pie"))+ labs(tag="(a)")
p2 <- ggplot(fams.obs, aes(x=variable, y=value))+
geom_boxplot(fill="grey80", color="black")+
scale_x_discrete() + xlab("Family")+
ylab("Completeness")+
geom_signif(comparisons=list(c("nymobsRatio", "papobsRatio"),
c("nymobsRatio", "lycobsRatio"),
c("nymobsRatio", "hesobsRatio"),
c("nymobsRatio", "pieobsRatio"),
c("lycobsRatio", "papobsRatio"),
c("hesobsRatio", "papobsRatio"),
c("pieobsRatio", "papobsRatio"),
c("hesobsRatio", "lycobsRatio"),
c("pieobsRatio", "lycobsRatio"),
c("pieobsRatio", "hesobsRatio")),
map_signif_level = TRUE,
tip_length=0,
y_position = c(-0.1, -0.2, -0.3, -0.4, 1.1, 1.2, 1.3, -0.6, -0.7, 1.5)) +
theme_minimal() + ylab("")+ scale_x_discrete(labels=c("Nym", "Pap", "Lyc", "Hes", "Pie"))+ labs(tag="(b)")
p3 <- ggplot(fams.spec, aes(x=variable, y=value))+
geom_boxplot(fill="grey80", color="black")+
scale_x_discrete() + xlab("Family")+
ylab("Completeness")+
geom_signif(comparisons=list(c("nymspecRatio", "papspecRatio"),
c("nymspecRatio", "lycspecRatio"),
c("nymspecRatio", "hesspecRatio"),
c("nymspecRatio", "piespecRatio"),
c("lycspecRatio", "papspecRatio"),
c("hesspecRatio", "papspecRatio"),
c("piespecRatio", "papspecRatio"),
c("hesspecRatio", "lycspecRatio"),
c("piespecRatio", "lycspecRatio"),
c("piespecRatio", "hesspecRatio")),
map_signif_level = TRUE,
tip_length=0,
y_position = c(-0.1, -0.2, -0.3, -0.4, 1.1, 1.2, 1.3, -0.6, -0.7, 1.5)) +
theme_minimal() + ylab("")+ scale_x_discrete(labels=c("Nym", "Pap", "Lyc", "Hes", "Pie"))+ labs(tag="(c)")
grid.arrange(p1, p2, p3, ncol=3, nrow=1)
dev.off()
# decades
# i = 1950
# while(i < 2020){
# print(paste("Processing years: ", i, "-", i+9))
#
# yearly[i-1949,]$startYear <- i
# yearly[i-1949,]$endYear <- i+1
#
# grd_100_biyearlyspec <- occur_spec %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMSpec=n_distinct(species))
# grd_100_biyearlyobs <- occur_obse %>% st_join(grd_100) %>% group_by(FID) %>% summarise(n_speciesInRMObse=n_distinct(species))
#
# yearly[i-1949,]$specCom <- mean(grd_100_biyearlyspec$n_speciesInRMSpec)
# yearly[i-1949,]$obsCom <- mean(grd_100_biyearlyspec$n_speciesInRMObse)
#
# i = i+10
# }
##################
# Visualizations #
##################
# 100 km fishnet no buffer
# km100_noBuffer <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=noBuff100), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, midpoint=300)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
# ggsave("km100_noBuffer.pdf", plot=km100_noBuffer)
#
# # 100 km fishnet buffer
# km100_Buffer <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(Buff100)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
# ggsave("km100_Buffer.pdf", plot=km100_Buffer)
#
# # visualize the occurrence richness values
# # 100 km all occurrences
# km100_allOccur <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(n_speciesAll)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
# ggsave("km100_allOccur.pdf", plot=km100_allOccur)
#
# # 100 km in range occurrences
# km100_allOccurInRM <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(n_speciesInRM)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
# ggsave("km100_allOccurInRM.pdf", plot=km100_allOccurInRM)
#
# # 100 km in range occurrences from specimens
# km100_specimensInRM <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(n_speciesInRMSpec)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
# ggsave("km100_specimensInRM.pdf", plot=km100_specimensInRM)
#
# # 100 km in range occurrences from human observations
# km100_observationsInRM <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(n_speciesInRMObse)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
# ggsave("km100_observationsInRM.pdf", plot=km100_observationsInRM)
#
# # 100 km all occurrences no buffer
# ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(logAllOccur_NoBuff)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # 100 km range occurrences no buffer
# ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=logInRMOccur_NoBuff), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
###########################################################
##### ALL RECORDS IN RANGE MAPS WITH BUFFERING FIGURE #####
###########################################################
# 100 km resolution
# composite data
grd_100_p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=logInRMOccur_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void() + labs(tag="(a)")
# museum specimens
grd_100_p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=logInRMSpec_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void() + labs(tag="(b)")
# human observations
grd_100_p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=logInRMObse_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void() + labs(tag="(c)")
# 200 km resolution
# composite data
grd_200_p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_200, mapping=aes(fill=logInRMOccur_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# museum specimens
grd_200_p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_200, mapping=aes(fill=logInRMSpec_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# human observations
grd_200_p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_200, mapping=aes(fill=logInRMObse_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# 400 km resolution
# composite data
grd_400_p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_400, mapping=aes(fill=logInRMOccur_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# museum specimens
grd_400_p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_400, mapping=aes(fill=logInRMSpec_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# human observations
grd_400_p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_400, mapping=aes(fill=logInRMObse_Buff), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
tiff("Basis.tiff", units="cm", width=16.6, height=18, res=400)
grid.arrange(grd_100_p1, grd_100_p2, grd_100_p3,
grd_200_p1, grd_200_p2, grd_200_p3,
grd_400_p1, grd_400_p2, grd_400_p3, nrow=3, ncol=3)
dev.off()
# # 800 km resolution
# # composite data
# grd_800_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=logInRMOccur_Buff), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # museum specimens
# grd_800_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=logInRMSpec_Buff), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # human observations
# grd_800_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=logInRMObse_Buff), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# g4 <- grid.arrange(grd_800_p1, grd_800_p2, grd_800_p3, nrow=3)
########################################################
##### FAMILY SPECIFIC SPECIES RICHNESS FROM RANGES #####
########################################################
# 100 km resolution #
# Hesperiidae
# grd_100_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(Buff100_hes)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Lycaenidae
# grd_100_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(Buff100_lyc)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Nymphalidae
# grd_100_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(Buff100_nym)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Papilionidae
# grd_100_p4 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(Buff100_pap)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Pieridae
# grd_100_p5 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_100, mapping=aes(fill=log(Buff100_pie)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# grid.arrange(grd_100_p1, grd_100_p2, grd_100_p3, grd_100_p4, grd_100_p5, nrow=5, ncol=1)
# # 200 km resolution #
# # Hesperiidae
# grd_200_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=log(Buff200_hes)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Lycaenidae
# grd_200_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=log(Buff200_lyc)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Nymphalidae
# grd_200_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=log(Buff200_nym)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Papilionidae
# grd_200_p4 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=log(Buff200_pap)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Pieridae
# grd_200_p5 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=log(Buff200_pie)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# grid.arrange(grd_200_p1, grd_200_p2, grd_200_p3, grd_200_p4, grd_200_p5, nrow=3, ncol=2)
#
# # 400 km resolution #
# # Hesperiidae
# grd_400_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=log(Buff400_hes)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Lycaenidae
# grd_400_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=log(Buff400_lyc)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Nymphalidae
# grd_400_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=log(Buff400_nym)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Papilionidae
# grd_400_p4 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=log(Buff400_pap)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Pieridae
# grd_400_p5 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=log(Buff400_pie)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# grid.arrange(grd_400_p1, grd_400_p2, grd_400_p3, grd_400_p4, grd_400_p5, nrow=3, ncol=2)
#
# # 800 km resolution #
# # Hesperiidae
# grd_800_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=log(Buff800_hes)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Lycaenidae
# grd_800_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=log(Buff800_lyc)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Nymphalidae
# grd_800_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=log(Buff800_nym)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Papilionidae
# grd_800_p4 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=log(Buff800_pap)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Pieridae
# grd_800_p5 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=log(Buff800_pie)), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,6), midpoint=2.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# grid.arrange(grd_800_p1, grd_800_p2, grd_800_p3, grd_800_p4, grd_800_p5, nrow=3, ncol=2)
##### FAMILIES LEVEL COMPLETENESS #####
# 100 km resolution observations only
# Hesperiidae
grd_100_p1_obse <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=hesobsRatio), color=NA, alpha=0.7, show.legend=TRUE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void() + labs(tag="(c)")
# Lycaenidae
grd_100_p2_obse <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=lycobsRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Nymphalidae
grd_100_p3_obse <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=nymobsRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Papilionidae
grd_100_p4_obse <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=papobsRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Pieridae
grd_100_p5_obse <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=pieobsRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# tiff("100kmFamilyObse.tiff", units="in", width=7, height=10, res=350)
# grid.arrange(grd_100_p1, grd_100_p2, grd_100_p3, grd_100_p4, grd_100_p5, nrow=5, ncol=1)
# dev.off()
# 100 km resolution specimens only
# Hesperiidae
grd_100_p1_spec <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=hesspecRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void() + labs(tag="(b)")
# Lycaenidae
grd_100_p2_spec <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=lycspecRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Nymphalidae
grd_100_p3_spec <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=nymspecRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Papilionidae
grd_100_p4_spec <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=papspecRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Pieridae
grd_100_p5_spec <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=piespecRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# tiff("100kmFamilySpec.tiff", units="in", width=7, height=10, res=350)
# grid.arrange(grd_100_p1, grd_100_p2, grd_100_p3, grd_100_p4, grd_100_p5, nrow=5, ncol=1)
# dev.off()
# 100 km all records
# Hesperiidae
grd_100_p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=hesRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void() + labs(tag="(a)")
# Lycaenidae
grd_100_p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=lycRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Nymphalidae
grd_100_p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=nymRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Papilionidae
grd_100_p4 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=papRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
# Pieridae
grd_100_p5 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(grd_100, mapping=aes(fill=pieRatio), color=NA, alpha=0.7, show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+ labs(tag=" ")
tiff("100kmFamilyAll_withChi.tiff", units="cm", width=19.5, height=20, res=350)
grid.arrange(grd_100_p1, grd_100_p1_spec, grd_100_p1_obse, chi.hes,
grd_100_p2, grd_100_p2_spec, grd_100_p2_obse, chi.lyc,
grd_100_p3, grd_100_p3_spec, grd_100_p3_obse, chi.nym,
grd_100_p4, grd_100_p4_spec, grd_100_p4_obse, chi.pap,
grd_100_p5, grd_100_p5_spec, grd_100_p5_obse, chi.pie,
nrow=5, ncol=4)
dev.off()
# # 200 km resolution
# # Hesperiidae
# grd_200_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=hesObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Lycaenidae
# grd_200_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=lycObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Nymphalidae
# grd_200_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=nymObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Papilionidae
# grd_200_p4 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=papObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Pieridae
# grd_200_p5 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_200, mapping=aes(fill=pieObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# grid.arrange(grd_200_p1, grd_200_p2, grd_200_p3, grd_200_p4, grd_200_p5, nrow=5, ncol=1)
#
# # 400 km resolution
# # Hesperiidae
# grd_400_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=hesObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Lycaenidae
# grd_400_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=lycObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Nymphalidae
# grd_400_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=nymObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Papilionidae
# grd_400_p4 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=papObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Pieridae
# grd_400_p5 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_400, mapping=aes(fill=pieObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# grid.arrange(grd_400_p1, grd_400_p2, grd_400_p3, grd_400_p4, grd_400_p5, nrow=5, ncol=1)
#
# # 800 km resolution
# # Hesperiidae
# grd_800_p1 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=hesObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Lycaenidae
# grd_800_p2 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=lycObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Nymphalidae
# grd_800_p3 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=nymObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Papilionidae
# grd_800_p4 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=papObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# # Pieridae
# grd_800_p5 <- ggplot()+
# geom_sf(data=land, fill="white", color="black")+
# geom_sf(grd_800, mapping=aes(fill=pieObsRatio), color=NA, alpha=0.7)+
# scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
# coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
# theme_void()
#
# grid.arrange(grd_800_p1, grd_800_p2, grd_800_p3, grd_800_p4, grd_800_p5, nrow=5, ncol=1)
################
# BOREAL PLOTS #
################
boreal <- st_read("borealtundra.shp")
boreal <- st_transform(boreal, st_crs(crs.1))
boreal <- st_crop(boreal, grd_100)
tiff("100kmFamilyAll_withChi.tiff", units="cm", width=10, height=10, res=350)
ggplot()+
geom_sf(data=land, fill="grey", color=NA)+
geom_sf(data=boreal, fill="#234F1E", color=NA)+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-2838545,4563455))+
theme_void()+
theme(legend.title = element_blank())
dev.off()
# 100km scale over time #
p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_100, mapping=aes(fill=t1Ratio, color=(t1Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_100, mapping=aes(fill=t2Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_100, mapping=aes(fill=t3Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25 & t3Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5,)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p4 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_100, mapping=aes(fill=t4Ratio, color=(t1Ratio>=0.25 & t2Ratio>=0.25 & t4Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()
grid.arrange(p1, p2, p3, p4, nrow=1, ncol=4)
# 200 km scale over time #
p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_200, mapping=aes(fill=t1Ratio, color=(t1Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_200, mapping=aes(fill=t2Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_200, mapping=aes(fill=t3Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25 & t3Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5,)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p4 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_200, mapping=aes(fill=t4Ratio, color=(t1Ratio>=0.25 & t2Ratio>=0.25 & t4Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()
grid.arrange(p1, p2, p3, p4, nrow=1, ncol=4)
# 400 km scale #
p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_400, mapping=aes(fill=t1Ratio, color=(t1Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_400, mapping=aes(fill=t2Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_400, mapping=aes(fill=t3Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25 & t3Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5,)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p4 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_400, mapping=aes(fill=t4Ratio, color=(t1Ratio>=0.25 & t2Ratio>=0.25 & t4Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()
grid.arrange(p1, p2, p3, p4, nrow=1, ncol=4)
# 800 km scale #
p1 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_800, mapping=aes(fill=t1Ratio, color=(t1Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p2 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_800, mapping=aes(fill=t2Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p3 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_800, mapping=aes(fill=t3Ratio, color=(t1Ratio>=0.25 & t2Ratio >=0.25 & t3Ratio >=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5,)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()+
theme(legend.title = element_blank())
p4 <- ggplot()+
geom_sf(data=land, fill="white", color="black")+
geom_sf(data=boreal, fill="grey", color=NA)+
geom_sf(grd_800, mapping=aes(fill=t4Ratio, color=(t1Ratio>=0.25 & t2Ratio>=0.25 & t4Ratio>=0.25)), alpha=0.7,
show.legend=FALSE)+
scale_fill_gradient2(low="#3c9ab2", mid="#e8c927", high="#f22300", na.value=NA, limits=c(0,1), midpoint=0.5)+
scale_color_manual(name="T1 Sufficiently Sampled", values=setNames(c("black", "white"), c(T, F)))+
coord_sf(xlim=c(-3935000, 2957500), ylim=c(-260000,4563455))+
theme_void()
grid.arrange(p1, p2, p3, p4, nrow=1, ncol=4)
|
whri = function(habitat, geometry = NULL, indicators, forecast = FALSE, indicators2 = NULL,
...) {
indispdf = indicators
if (inherits(geometry, "SpatialPolygons")) {
w2 = over(indicators, geometry, returnList = TRUE)
indispdf$weight = sapply(w2, length)
} else if (inherits(geometry, "SpatialPoints")) {
print("points not implemented yet")
} else if (is.null(geometry)) {
return(mhri(indicators, habitat, meanPoly = meanPoly, covPoly = covPoly, forecast, ...))
} else stop(paste("weighted eHab not possible, class of geometry:", class(geometry)))
ndim = dim(indicators)[2]
nvar = ifelse(forecast && is.null(indicators2), ndim/2,ndim)
meanPoly = sapply(indispdf@data[,1:nvar], weight = indispdf$weight,
FUN = function(x, weight) weighted.mean(x, weight))
covPoly = cov.wt(indispdf@data[,1:nvar], indispdf$weight)$cov
mhri(indicators, habitat, meanPoly = meanPoly, covPoly = covPoly, forecast = forecast, ...)
}
#whri2(pa, mammr, indispdf1)
|
/old/R/whri.R
|
no_license
|
javimarlop/eHabitat
|
R
| false
| false
| 1,012
|
r
|
whri = function(habitat, geometry = NULL, indicators, forecast = FALSE, indicators2 = NULL,
...) {
indispdf = indicators
if (inherits(geometry, "SpatialPolygons")) {
w2 = over(indicators, geometry, returnList = TRUE)
indispdf$weight = sapply(w2, length)
} else if (inherits(geometry, "SpatialPoints")) {
print("points not implemented yet")
} else if (is.null(geometry)) {
return(mhri(indicators, habitat, meanPoly = meanPoly, covPoly = covPoly, forecast, ...))
} else stop(paste("weighted eHab not possible, class of geometry:", class(geometry)))
ndim = dim(indicators)[2]
nvar = ifelse(forecast && is.null(indicators2), ndim/2,ndim)
meanPoly = sapply(indispdf@data[,1:nvar], weight = indispdf$weight,
FUN = function(x, weight) weighted.mean(x, weight))
covPoly = cov.wt(indispdf@data[,1:nvar], indispdf$weight)$cov
mhri(indicators, habitat, meanPoly = meanPoly, covPoly = covPoly, forecast = forecast, ...)
}
#whri2(pa, mammr, indispdf1)
|
library(memuse)
same = function(x, y) stopifnot(all.equal(x, y))
x = mu(5000, prefix="SI")
y = mu(4001, prefix="SI")
z = 100
class(z) = "object_size"
same(mu.size(x+y), 9.001)
same(mu.size(x+1000), 6)
same(mu.size(1000+x), 6)
same(mu.size(x+z), 5.1)
same(mu.size(z+x), 5.1)
same(mu.size(x-y), 999)
same(mu.size(x-1000), 4)
same(mu.size(10000-x), 5)
same(mu.size(x-z), 4.9)
same(mu.size(51*z - x), 100)
same(mu.size(x*y), 20.005)
same(mu.size(x*2), 10)
same(mu.size(2*x), 10)
same(mu.size(x*z), 500)
same(mu.size(z*x), 500)
same(x/y, 5/4.001)
same(mu.size(x/2), 2.5)
same(10000/x, 2)
same(x/z, 50)
same(z/x, 0.02)
same(mu.size(x^2), 25)
same(mu.size(x^x), Inf)
same(mu.size(sum(x, y, 999)), 10)
|
/tests/arithmetic.R
|
permissive
|
shinra-dev/memuse
|
R
| false
| false
| 704
|
r
|
library(memuse)
same = function(x, y) stopifnot(all.equal(x, y))
x = mu(5000, prefix="SI")
y = mu(4001, prefix="SI")
z = 100
class(z) = "object_size"
same(mu.size(x+y), 9.001)
same(mu.size(x+1000), 6)
same(mu.size(1000+x), 6)
same(mu.size(x+z), 5.1)
same(mu.size(z+x), 5.1)
same(mu.size(x-y), 999)
same(mu.size(x-1000), 4)
same(mu.size(10000-x), 5)
same(mu.size(x-z), 4.9)
same(mu.size(51*z - x), 100)
same(mu.size(x*y), 20.005)
same(mu.size(x*2), 10)
same(mu.size(2*x), 10)
same(mu.size(x*z), 500)
same(mu.size(z*x), 500)
same(x/y, 5/4.001)
same(mu.size(x/2), 2.5)
same(10000/x, 2)
same(x/z, 50)
same(z/x, 0.02)
same(mu.size(x^2), 25)
same(mu.size(x^x), Inf)
same(mu.size(sum(x, y, 999)), 10)
|
setwd("/GitHub/dataanalysis_001/a2")
setInternet2(T)
url <- "https://spark-public.s3.amazonaws.com/dataanalysis/samsungData.rda"
dest <- "raw/samsungData.rda"
download.file(url, dest)
dateDownloaded <- date()
load("raw/samsungData.rda")
sam <- samsungData
## Data exploration
table(is.na(sam))
colnames(sam)
lapply(sam,class)
## Data munging
sam$activity <- as.factor(sam$activity)
names(sam)[1:561] <- c(1:561)
colnames(sam) = paste("x", colnames(sam), sep="")
names(sam)[562:563] <- c("subject","activity")
## Load Libraries
library(tree)
library(randomForest)
library(ipred)
library(boot)
## Split data (train, test, validate)
t <- sam[sam$subject == c(27,28,29,30),]
nont <- sam[!sam$subject %in% t$subject,]
test <- sam[!sam$subject %in% nont$subject,]
t <- nont[sam$subject == c(22,23,25,26),]
train <- nont[!nont$subject %in% t$subject,]
validate <- nont[!nont$subject %in% train$subject,]
trainless <- train[,c(1:561,563)]
## Define Error Rate
## True Positive?
## Predictive features
## Predictive function
forest <- randomForest(activity ~ .,data=train,prox=T)
forest2 <- randomForest(activity ~ .,data=trainless,prox=T)
## Validate
p1 <- predict(forest, validate)
p2 <- predict(forest2, validate)
sum(p1 != validate$activity)/length(validate$activity)
sum(p2 != validate$activity)/length(validate$activity)
## Graphs
par(mfrow=c(1,2))
act = as.numeric(as.factor(train$activity))
svd1 = svd(scale(train[,-c(562,563)]))
# max.1 is 296
# max.2 is 249
plot(svd1$v[,2],col=act,pch=19, cex=1)
hist(act)
##
names(sam) = names(samsungData)
names(train) = names(sam)
# heatmap(as.matrix(train[,-c(562,563)]))
heat = table(c("subject",train[train$subject == 1,]$activity))
for(i in c(3,5,6,7,8,11,14,15,16,17,19,21)) {
heat <- rbind(heat,c(tabulate(train[train$subject == i,]$activity),i))
}
heat <- as.data.frame(heat)
row.names(heat) <- heat$subject
heat <- heat[,1:6]
colnames(heat) = names(table(train$activity))
heatmap(as.matrix(heat),scale="column",Rowv=NA,Colv=NA, ylab="Subject",main="Heatmap of Each Subject's Activity (Training Set)",col=cm.colors(256))
mtext(text="Activity", side=1, line=4.2)
## Test
p1 <- predict(forest, test)
p2 <- predict(forest2, test)
sum(p1 != test$activity)/length(test$activity)
sum(p2 != test$activity)/length(test$activity)
# library(scatterplot3d)
# attach(train)
# scatterplot3d(train[,294],train[,295],train[,296])
# train$pcolor[train$activity=="laying"] <- "green4"
# train$pcolor[train$activity=="sitting"] <- "green3"
# train$pcolor[train$activity=="standing"] <- "green2"
# train$pcolor[train$activity=="walk"] <- "green1"
# train$pcolor[train$activity=="walkdown"] <- "green"
# train$pcolor[train$activity=="walkup"] <- "greenyellow"
# scatterplot3d(train[,294],train[,295],train[,296],color=train$pcolor)
# scatterplot3d(train[,282],train[,283],train[,284],color=train$pcolor)
# scatterplot3d(train[,559],train[,560],train[,561],color=train$pcolor, pch=19,type="h")
|
/a2/assign2.R
|
no_license
|
as3923/dataanalysis_001
|
R
| false
| false
| 2,951
|
r
|
setwd("/GitHub/dataanalysis_001/a2")
setInternet2(T)
url <- "https://spark-public.s3.amazonaws.com/dataanalysis/samsungData.rda"
dest <- "raw/samsungData.rda"
download.file(url, dest)
dateDownloaded <- date()
load("raw/samsungData.rda")
sam <- samsungData
## Data exploration
table(is.na(sam))
colnames(sam)
lapply(sam,class)
## Data munging
sam$activity <- as.factor(sam$activity)
names(sam)[1:561] <- c(1:561)
colnames(sam) = paste("x", colnames(sam), sep="")
names(sam)[562:563] <- c("subject","activity")
## Load Libraries
library(tree)
library(randomForest)
library(ipred)
library(boot)
## Split data (train, test, validate)
t <- sam[sam$subject == c(27,28,29,30),]
nont <- sam[!sam$subject %in% t$subject,]
test <- sam[!sam$subject %in% nont$subject,]
t <- nont[sam$subject == c(22,23,25,26),]
train <- nont[!nont$subject %in% t$subject,]
validate <- nont[!nont$subject %in% train$subject,]
trainless <- train[,c(1:561,563)]
## Define Error Rate
## True Positive?
## Predictive features
## Predictive function
forest <- randomForest(activity ~ .,data=train,prox=T)
forest2 <- randomForest(activity ~ .,data=trainless,prox=T)
## Validate
p1 <- predict(forest, validate)
p2 <- predict(forest2, validate)
sum(p1 != validate$activity)/length(validate$activity)
sum(p2 != validate$activity)/length(validate$activity)
## Graphs
par(mfrow=c(1,2))
act = as.numeric(as.factor(train$activity))
svd1 = svd(scale(train[,-c(562,563)]))
# max.1 is 296
# max.2 is 249
plot(svd1$v[,2],col=act,pch=19, cex=1)
hist(act)
##
names(sam) = names(samsungData)
names(train) = names(sam)
# heatmap(as.matrix(train[,-c(562,563)]))
heat = table(c("subject",train[train$subject == 1,]$activity))
for(i in c(3,5,6,7,8,11,14,15,16,17,19,21)) {
heat <- rbind(heat,c(tabulate(train[train$subject == i,]$activity),i))
}
heat <- as.data.frame(heat)
row.names(heat) <- heat$subject
heat <- heat[,1:6]
colnames(heat) = names(table(train$activity))
heatmap(as.matrix(heat),scale="column",Rowv=NA,Colv=NA, ylab="Subject",main="Heatmap of Each Subject's Activity (Training Set)",col=cm.colors(256))
mtext(text="Activity", side=1, line=4.2)
## Test
p1 <- predict(forest, test)
p2 <- predict(forest2, test)
sum(p1 != test$activity)/length(test$activity)
sum(p2 != test$activity)/length(test$activity)
# library(scatterplot3d)
# attach(train)
# scatterplot3d(train[,294],train[,295],train[,296])
# train$pcolor[train$activity=="laying"] <- "green4"
# train$pcolor[train$activity=="sitting"] <- "green3"
# train$pcolor[train$activity=="standing"] <- "green2"
# train$pcolor[train$activity=="walk"] <- "green1"
# train$pcolor[train$activity=="walkdown"] <- "green"
# train$pcolor[train$activity=="walkup"] <- "greenyellow"
# scatterplot3d(train[,294],train[,295],train[,296],color=train$pcolor)
# scatterplot3d(train[,282],train[,283],train[,284],color=train$pcolor)
# scatterplot3d(train[,559],train[,560],train[,561],color=train$pcolor, pch=19,type="h")
|
# R scripts for extracting and plotting data stored in Mplus graphic
# information in GH5 files. Uses the rhdf5 package for loading the
# the GH5 file.
#
# Version history:
# 2013-09-13 File Version 3 for Mplus Version 7.3
# 2014-04-30 Fix for sample and estimated means.
# 2014-10-07 Fix IRT ICC and IIC functions, turning properties into integers
# 2014-10-08 Add functions for Discrete survival curves
# 2014-11-20 Fix estimated probabilities function, turning categories into integers.
# 2014-11-21 Add legend to plot of estimated probabilities.
#
#
# Written by: Thuy Nguyen
# Muthen & Muthen
#
# Reference:
#
# Bernd Fischer and Gregoire Pau (). rhdf5: HDF5 interface to R. R
# package version 2.4.0.
#
if (require(rhdf5,quietly=TRUE)) {
print("Loaded rhdf5 package")
} else {
print("trying to install rhdf5 from bioconductor.org")
source("http://bioconductor.org/biocLite.R")
biocLite("rhdf5")
if (require(rhdf5)) {
print("Loaded missing rhdf5 package ")
} else {
stop("could not install rhdf5")
}
}
##########################################################################
#
# mplus.view.plots - loads the file and lists all available plots
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.view.plots('ex.gh5')
#
mplus.view.plots <- function(file) {
mplus.load(file)
}
##########################################################################
#
# mplus.load - loads the file and lists all available plots
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.load('ex.gh5')
#
mplus.load <- function(file) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
cat(cstr)
}
gh5 <- h5dump(file, load=TRUE)
cat(c("\nPlot functions:\n"))
if ("individual_data" %in% names(gh5)) {
if (exists("mplus.plot.histogram",mode="function")) {
cat(c(" - mplus.plot.histogram('"),file,"',variable,bins)\n",sep="")
}
if (exists("mplus.plot.scatterplot",mode="function")) {
cat(c(" - mplus.plot.scatterplot('"),file,"',xvar,yvar)\n",sep="")
}
}
if ("process_data" %in% names(gh5) && "means_and_variances_data" %in% names(gh5)) {
np <- length(attr(gh5$process_data,"names"))
for (i in c(1:np)) {
cstr <- paste(c("process"), as.character(i), sep="")
proc <- gh5$process_data[[cstr]]
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",cstr,"", sep="")
prop <- mplus.get.group.attribute(file, cstr2, 'properties')
values <- attr(gh5$means_and_variances_data,"names")
if (prop[1] == 1) {
sm_ind <- pmatch("y_observed_means",values,nomatch=0)
if (sm_ind > 0 && exists("mplus.plot.sample_means",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.sample_means('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("y_estimated_means",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.estimated_means",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.estimated_means('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
if (sm_ind>0 && em_ind>0 && exists("mplus.plot.sample_and_estimated_means",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.sample_and_estimated_means('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("y_estimated_modes",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.estimated_modes",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.estimated_modes('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("y_estimated_medians",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.estimated_medians",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.estimated_medians('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
} else if (prop[1] == 2) {
em_ind <- pmatch("e_estimated_means",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.estimated_means",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.estimated_means('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("e_estimated_modes",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.estimated_modes",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.estimated_modes('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("e_estimated_medians",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.estimated_medians",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.estimated_medians('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
} else if (prop[1] == 3) {
em_ind <- pmatch("observed_probs",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.sample_proportions",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.sample_proportions('"),file,"','",cstr,"',cat1,cat2)\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("estimated_probs",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.estimated_probabilities",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.estimated_probabilities('"),file,"','",cstr,"',cat1,cat2)\n",sep="")
cat(cstr2)
}
} else {
cstr2 <- paste(c("'"),cstr,"' has unknown series type.\n")
cat(cstr2)
}
}
}
if ("loop_data" %in% names(gh5)) {
if (exists("mplus.list.loop.labels",mode="function")) {
cat(c(" - mplus.list.loop.labels('"),file,"')\n",sep="")
}
if (exists("mplus.plot.loop",mode="function")) {
cat(c(" - mplus.plot.loop('"),file,"')\n",sep="")
}
}
if ("irt_data" %in% names(gh5)) {
if (exists("mplus.list.irt.variables",mode="function")) {
cat(c(" - mplus.list.irt.variables('"),file,"')\n",sep="")
}
if (exists("mplus.list.irt.xvariables",mode="function")) {
cat(c(" - mplus.list.irt.xvariables('"),file,"')\n",sep="")
}
if (exists("mplus.plot.irt.icc",mode="function")) {
cat(c(" - mplus.plot.irt.icc('"),file,"',group,xvar,uvar,cat,cat2,covariates,xrange,xstep,lloc)\n",sep="")
}
if (exists("mplus.plot.irt.iic",mode="function")) {
cat(c(" - mplus.plot.irt.iic('"),file,"',group,xvar,uvar,covariates,xrange,xstep,lloc)\n",sep="")
}
if (exists("mplus.plot.irt.tic",mode="function")) {
cat(c(" - mplus.plot.irt.tic('"),file,"',group,xvar,uvar,covariates,xrange,xstep)\n",sep="")
}
}
if ("survival_data" %in% names(gh5)) {
if (exists("mplus.plot.survival.kaplanmeier",mode="function")) {
cat(c(" - mplus.plot.survival.kaplanmeier('"),file,"',survvar,classnum)\n",sep="")
}
if (exists("mplus.plot.survival.baseline",mode="function")) {
cat(c(" - mplus.plot.survival.baseline('"),file,"',survvar,classnum)\n",sep="")
}
if (exists("mplus.plot.survival.basehazard",mode="function")) {
cat(c(" - mplus.plot.survival.basehazard('"),file,"',survvar,classnum)\n",sep="")
}
if (exists("mplus.plot.survival.sample.logcumulative",mode="function")) {
cat(c(" - mplus.plot.survival.sample.logcumulative('"),file,"',survvar,classnum)\n",sep="")
}
if (exists("mplus.plot.survival.estimated.logcumulative",mode="function")) {
cat(c(" - mplus.plot.survival.estimated.logcumulative('"),file,"',survar,classnum)\n",sep="")
}
if (exists("mplus.plot.survival.kaplanmeier.vs.baseline",mode="function")) {
cat(c(" - mplus.plot.survival.kaplanmeier.vs.baseline('"),file,"',survvar,classnum)\n",sep="")
}
if (exists("mplus.plot.survival.sample.vs.estimated.logcumulative",mode="function")) {
cat(c(" - mplus.plot.survival.sample.vs.estimated.logcumulative('"),file,"',survvar,classnum)\n",sep="")
}
}
if ("discrete_survival_data" %in% names(gh5)) {
if (exists("mplus.plot.discrete.survival.kaplanmeier",mode="function")) {
cat(c(" - mplus.plot.discrete.survival.kaplanmeier('"),file,"',survvar,classnum)\n",sep="")
}
if (exists("mplus.plot.discrete.survival.baseline",mode="function")) {
cat(c(" - mplus.plot.discrete.survival.baseline('"),file,"',survvar,classnum)\n",sep="")
}
if (exists("mplus.plot.discrete.survival.kaplanmeier.vs.baseline",mode="function")) {
cat(c(" - mplus.plot.discrete.survival.kaplanmeier.vs.baseline('"),file,"',survvar,classnum)\n",sep="")
}
}
if ("bayesian_data" %in% names(gh5)) {
if ("parameters_autocorr" %in% names(gh5$bayesian_data)) {
if ("parameters" %in% names(gh5$bayesian_data$parameters_autocorr)) {
if (exists("mplus.list.bayesian.parameters",mode="function")) {
cat(c(" - mplus.list.bayesian.parameters('"),file,"',parameter)\n",sep="")
}
if (exists("mplus.plot.bayesian.traceplot",mode="function")) {
cat(c(" - mplus.plot.bayesian.traceplot('"),file,"',parameter)\n",sep="")
}
if (exists("mplus.plot.bayesian.distribution",mode="function")) {
cat(c(" - mplus.plot.bayesian.distribution('"),file,"',parameter,bins)\n",sep="")
}
}
if ("priors" %in% names(gh5$bayesian_data$parameters_autocorr)) {
if (exists("mplus.plot.bayesian.prior.distribution",mode="function")) {
cat(c(" - mplus.plot.bayesian.prior.distribution('"),file,"',parameter,bins)\n",sep="")
}
}
if ("autocorrelation" %in% names(gh5$bayesian_data$parameters_autocorr)) {
if (exists("mplus.plot.bayesian.autocorrelation",mode="function")) {
cat(c(" - mplus.plot.bayesian.autocorrelation('"),file,"',parameter,chain)\n",sep="")
}
}
}
if ("predictive" %in% names(gh5$bayesian_data)) {
if (exists("mplus.list.bayesian.predictive.labels",mode="function")) {
cat(c(" - mplus.list.bayesian.predictive.labels('"),file,"')\n",sep="")
}
if ("observed" %in% names(gh5$bayesian_data$predictive) && "replicated" %in% names(gh5$bayesian_data$predictive)) {
if (exists("mplus.plot.bayesian.predictive.scatterplot",mode="function")) {
cat(c(" - mplus.plot.bayesian.predictive.scatterplot('"),file,"',plabel)\n",sep="")
}
if (exists("mplus.plot.bayesian.predictive.distribution",mode="function")) {
cat(c(" - mplus.plot.bayesian.predictive.distribution('"),file,"',plabel,bins)\n",sep="")
}
}
}
if ("plausible" %in% names(gh5$bayesian_data)) {
if (exists("mplus.list.bayesian.plausible.labels",mode="function")) {
cat(c(" - mplus.list.bayesian.plausible.labels('"),file,"')\n",sep="")
}
if (exists("mplus.plot.bayesian.plausible.distribution",mode="function")) {
cat(c(" - mplus.plot.bayesian.plausible.distribution('"),file,"',plauslabel,obs,bins)\n",sep="")
}
}
}
cat(c("\nPlot data extraction functions:\n"))
if ("individual_data" %in% names(gh5)) {
if (exists("mplus.list.variables",mode="function")) {
cat(c(" - mplus.list.variables('"),file,"')\n",sep="")
}
if (exists("mplus.get.data",mode="function")) {
cat(c(" - mplus.get.data('"),file,"',variable)\n",sep="")
}
}
if ("process_data" %in% names(gh5)) {
if (exists("mplus.list.processes",mode="function")) {
cat(c(" - mplus.list.processes('"),file,"')\n",sep="")
}
}
if ("loop_data" %in% names(gh5)) {
if (exists("mplus.get.loop.estimates",mode="function")) {
cat(c(" - mplus.get.loop.estimates('"),file,"',looplabel)\n",sep="")
}
if (exists("mplus.get.loop.lowerci",mode="function")) {
cat(c(" - mplus.get.loop.lowerci('"),file,"',looplabel)\n",sep="")
}
if (exists("mplus.get.loop.upperci",mode="function")) {
cat(c(" - mplus.get.loop.upperci('"),file,"',looplabel)\n",sep="")
}
if (exists("mplus.get.loop.xvalues",mode="function")) {
cat(c(" - mplus.get.loop.xvalues('"),file,"')\n",sep="")
}
}
if ("irt_data" %in% names(gh5)) {
if (exists("mplus.compute.irt.icc",mode="function")) {
cat(c(" - mplus.compute.irt.icc('"),file,"',group,xvar,uvar,cat,xvector,covariates)\n",sep="")
}
if (exists("mplus.compute.irt.iic",mode="function")) {
cat(c(" - mplus.compute.irt.iic('"),file,"',group,xvar,uvar,xvector,covariates)\n",sep="")
}
}
if ("process_data" %in% names(gh5) && "means_and_variances_data" %in% names(gh5)) {
np <- length(attr(gh5$process_data,"names"))
for (i in c(1:np)) {
cstr <- paste(c("process"), as.character(i), sep="")
proc <- gh5$process_data[[cstr]]
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",cstr,"", sep="")
prop <- mplus.get.group.attribute(file, cstr2, 'properties')
values <- attr(gh5$means_and_variances_data,"names")
if (prop[1] == 1) {
sm_ind <- pmatch("y_observed_means",values,nomatch=0)
if (sm_ind > 0 && exists("mplus.get.sample_means",mode="function")) {
cstr2 <- paste(c(" - mplus.get.sample_means('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("y_estimated_means",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.estimated_means",mode="function")) {
cstr2 <- paste(c(" - mplus.get.estimated_means('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("y_estimated_modes",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.estimated_modes",mode="function")) {
cstr2 <- paste(c(" - mplus.get.estimated_modes('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("y_estimated_medians",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.estimated_medians",mode="function")) {
cstr2 <- paste(c(" - mplus.get.estimated_medians('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
} else if (prop[1] == 2) {
em_ind <- pmatch("e_estimated_means",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.estimated_means",mode="function")) {
cstr2 <- paste(c(" - mplus.get.estimated_means('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("e_estimated_modes",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.estimated_modes",mode="function")) {
cstr2 <- paste(c(" - mplus.get.estimated_modes('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("e_estimated_medians",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.estimated_medians",mode="function")) {
cstr2 <- paste(c(" - mplus.get.estimated_medians('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
} else if (prop[1] == 3) {
em_ind <- pmatch("observed_probs",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.sample_proportions",mode="function")) {
cstr2 <- paste(c(" - mplus.get.sample_proportions('"),file,"','",cstr,"',cat1,cat2)\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("estimated_probs",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.estimated_probabilities",mode="function")) {
cstr2 <- paste(c(" - mplus.get.estimated_probabilities('"),file,"','",cstr,"',cat1,cat2)\n",sep="")
cat(cstr2)
}
} else {
cstr2 <- paste(c("'"),cstr,"' has unknown series type.\n")
cat(cstr2)
}
}
}
if ("survival_data" %in% names(gh5)) {
if (exists("mplus.list.survival.variables",mode="function")) {
cat(c(" - mplus.list.survival.variables('"),file,"')\n",sep="")
}
if (exists("mplus.get.survival.kaplanmeier.values",mode="function")) {
cat(c(" - mplus.get.survival.kaplanmeier.values('"),file,"',survvar,classnum,time)\n",sep="")
}
if (exists("mplus.compute.survival.sample.logcumulative.values",mode="function")) {
cat(c(" - mplus.compute.survival.sample.logcumulative.values('"),file,"',survvar,classnum,time)\n",sep="")
}
if (exists("mplus.get.survival.baseline.values",mode="function")) {
cat(c(" - mplus.get.survival.baseline.values('"),file,"',survvar,survvar2,clasnum,time)\n",sep="")
}
if (exists("mplus.compute.survival.estimated.logcumulative.values",mode="function")) {
cat(c(" - mplus.compute.survival.estimated.logcumulative.values('"),file,"',survvar,classnum,time)\n",sep="")
}
if (exists("mplus.get.survival.basehazard.values",mode="function")) {
cat(c(" - mplus.get.survival.basehazard.values('"),file,"',file,survvar,classnum,time)\n",sep="")
}
}
if ("discrete_survival_data" %in% names(gh5)) {
if (exists("mplus.list.discrete.survival.variables",mode="function")) {
cat(c(" - mplus.list.discrete.survival.variables('"),file,"')\n",sep="")
}
if (exists("mplus.get.discrete.survival.kaplanmeier.values",mode="function")) {
cat(c(" - mplus.get.discrete.survival.kaplanmeier.values('"),file,"',survvar,classnum,time)\n",sep="")
}
if (exists("mplus.get.discrete.survival.baseline.values",mode="function")) {
cat(c(" - mplus.get.discrete.survival.baseline.values('"),file,"',survvar,survvar2,clasnum,time)\n",sep="")
}
}
if ("bayesian_data" %in% names(gh5)) {
if ("parameters_autocorr" %in% names(gh5$bayesian_data)) {
if ("parameters" %in% names(gh5$bayesian_data$parameters_autocorr)) {
if (exists("mplus.get.bayesian.parameter.data",mode="function")) {
cat(c(" - mplus.get.bayesian.parameter.data('"),file,"',parameter,chain)\n",sep="")
}
}
if ("priors" %in% names(gh5$bayesian_data$parameters_autocorr)) {
if (exists("mplus.get.bayesian.prior.parameter.data",mode="function")) {
cat(c(" - mplus.get.bayesian.prior.parameter.data('"),file,"',parameter)\n",sep="")
}
}
if ("autocorrelation" %in% names(gh5$bayesian_data$parameters_autocorr)) {
if (exists("mplus.get.bayesian.autocorrelation",mode="function")) {
cat(c(" - mplus.get.bayesian.autocorrelation('"),file,"',parameter,chain)\n",sep="")
}
}
}
if ("predictive" %in% names(gh5$bayesian_data)) {
if ("observed" %in% names(gh5$bayesian_data$predictive)) {
if (exists("mplus.get.bayesian.predictive.observed",mode="function")) {
cat(c(" - mplus.get.bayesian.predictive.observed('"),file,"',plabel)\n",sep="")
}
}
if ("replicated" %in% names(gh5$bayesian_data$predictive)) {
if (exists("mplus.get.bayesian.predictive.replicated",mode="function")) {
cat(c(" - mplus.get.bayesian.predictive.replicated('"),file,"',plabel)\n",sep="")
}
}
if ("pvalues" %in% names(gh5$bayesian_data$predictive)) {
if (exists("mplus.get.bayesian.predictive.lowerci",mode="function")) {
cat(c(" - mplus.get.bayesian.predictive.lowerci('"),file,"',plabel)\n",sep="")
}
if (exists("mplus.get.bayesian.predictive.upperci",mode="function")) {
cat(c(" - mplus.get.bayesian.predictive.upperci('"),file,"',plabel)\n",sep="")
}
if (exists("mplus.get.bayesian.predictive.pvalue",mode="function")) {
cat(c(" - mplus.get.bayesian.predictive.pvalue('"),file,"',plabel)\n",sep="")
}
if (exists("mplus.get.bayesian.predictive.pvalue_type",mode="function")) {
cat(c(" - mplus.get.bayesian.predictive.pvalue('"),file,"',plabel)\n",sep="")
}
}
}
if ("plausible" %in% names(gh5$bayesian_data)) {
if (exists("mplus.get.bayesian.plausible.data",mode="function")) {
cat(c(" - mplus.get.bayesian.plausible.data('"),file,"',plauslabel,obs)\n",sep="")
}
}
}
invisible(file)
}
##########################################################################
#
# mplus.clear - clears all mplus-related data from a previous mplus_load
#
# arguments: none
#
# eg. mplus.clear()
#
#mplus.clear <- function() {
# cat(c("\nRemoved the following:\n"))
#
# if (exists("matrix_data",)) {
# rm(matrix_data, inherits=TRUE)
# cat(c(" - matrix_data\n"))
# }
# if (exists("process_data",)) {
# rm(process_data, inherits=TRUE)
# cat(c(" - process_data\n"))
# }
# if (exists("class_data")) {
# rm(class_data, inherits=TRUE)
# cat(c(" - class_data\n"))
# }
# if (exists("categorical_data")) {
# rm(categorical_data, inherits=TRUE)
# cat(c(" - categorical_data\n"))
# }
# if (exists("individual_data")) {
# rm(individual_data, inherits=TRUE)
# cat(c(" - individual_data\n"))
# }
# if (exists("means_and_variances_data")) {
# rm(means_and_variances_data, inherits=TRUE)
# cat(c(" - means_and_variances_data\n"))
# }
#}
##########################################################################
#
# mplus.list.processes - list all available processes
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.processes('ex8.1.gh5')
#
mplus.list.processes <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
if (!("process_data" %in% names(gh5))) {
stop("mplus.list.proceses requires series information.\n\nUse the SERIES option in Mplus to specify series information.\n")
}
cat(c("\nList of process names to use in the following functions:\n"))
cat(c(" - mplus.plot.sample_means\n"))
cat(c(" - mplus.plot.estimated_means\n"))
cat(c(" - mplus.plot.sample_and_estimated_means\n"))
cat(c(" - mplus.plot.sample_proportions\n"))
cat(c(" - mplus.plot.estimated_probabilities\n"))
cat(c(" - mplus.get.sample_means\n"))
cat(c(" - mplus.get.estimated_means\n"))
cat(c(" - mplus.get.sample_proportions\n"))
cat(c(" - mplus.get.estimated_probabilities\n"))
cat(c("\nProcesses:\n"))
allpnames <- attr(gh5$process_data,"names")
allpnames
}
##########################################################################
#
# mplus.plot.estimated_means - plot estimated means for the quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
#
# eg. mplus.plot.estimated_means('ex8.1.gh5','process1')
#
mplus.plot.estimated_means <-function(file,procstr='process1',ptype='o') {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated means.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file, cstr2, 'properties')
series_type <- prop[1]
if ( ! (series_type == 1 || series_type == 2) ) {
cstr <- paste("- process does not have estimated means:",procstr,"\n\n")
stop(cstr)
}
# get the time scores
xx <- proc$time_scores
# set up the array for the estimated means
dims <- attr(proc$time_scores,"dim")
yy <- mplus.get.estimated_means(file,procstr)
# plot the means
cstr <- paste("Estimated means for",procstr)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n')
symb <- array(c(21,22,23,24,25),c(dims[1]))
colors <- rainbow(dims[2])
for (i in c(1:dims[2])) {
lines(xx[,i],yy[,i],type=ptype,pch=symb[i],col=colors[i])
}
ldesc <- array(0,c(dims[2]))
lty <- array(0,c(dims[2]))
lwd <- array(0,c(dims[2]))
lcol <- array(0,c(dims[2]))
for (i in c(1:dims[2])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
lcol[i] <- colors[i]
}
legend('bottomright',col=lcol,pch=symb,ldesc,lty=lty,lwd=lwd)
}
##########################################################################
#
# mplus.plot.estimated_modes - plot estimated modes for the quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
#
# eg. mplus.plot.estimated_modes('ex8.1.gh5','process1')
#
mplus.plot.estimated_modes <-function(file,procstr='process1',ptype='o') {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated modes.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file, cstr2, 'properties')
series_type <- prop[1]
if ( ! (series_type == 1 || series_type == 2) ) {
cstr <- paste("- process does not have estimated modes:",procstr,"\n\n")
stop(cstr)
}
# get the time scores
xx <- proc$time_scores
# set up the array for the estimated means
dims <- attr(proc$time_scores,"dim")
yy <- mplus.get.estimated_modes(file,procstr)
# plot the means
cstr <- paste("Estimated modes for",procstr)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n')
symb <- array(c(21,22,23,24,25),c(dims[1]))
colors <- rainbow(dims[2])
for (i in c(1:dims[2])) {
lines(xx[,i],yy[,i],type=ptype,pch=symb[i],col=colors[i])
}
ldesc <- array(0,c(dims[2]))
lty <- array(0,c(dims[2]))
lwd <- array(0,c(dims[2]))
lcol <- array(0,c(dims[2]))
for (i in c(1:dims[2])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
lcol[i] <- colors[i]
}
legend('bottomright',col=lcol,pch=symb,ldesc,lty=lty,lwd=lwd)
}
##########################################################################
#
# mplus.plot.estimated_medians - plot estimated medians for the quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
#
# eg. mplus.plot.estimated_medians('ex8.1.gh5','process1')
#
mplus.plot.estimated_medians <-function(file,procstr='process1',ptype='o') {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated medians.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file, cstr2, 'properties')
series_type <- prop[1]
if ( ! (series_type == 1 || series_type == 2) ) {
cstr <- paste("- process does not have estimated medians:",procstr,"\n\n")
stop(cstr)
}
# get the time scores
xx <- proc$time_scores
# set up the array for the estimated means
dims <- attr(proc$time_scores,"dim")
yy <- mplus.get.estimated_medians(file,procstr)
# plot the means
cstr <- paste("Estimated medians for",procstr)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n')
symb <- array(c(21,22,23,24,25),c(dims[1]))
colors <- rainbow(dims[2])
for (i in c(1:dims[2])) {
lines(xx[,i],yy[,i],type=ptype,pch=symb[i],col=colors[i])
}
ldesc <- array(0,c(dims[2]))
lty <- array(0,c(dims[2]))
lwd <- array(0,c(dims[2]))
lcol <- array(0,c(dims[2]))
for (i in c(1:dims[2])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
lcol[i] <- colors[i]
}
legend('bottomright',col=lcol,pch=symb,ldesc,lty=lty,lwd=lwd)
}
##########################################################################
#
# mplus.plot.sample_means - plot sample means for the quoted process
#
# arguments:
# procstr - the quoted name of a series
#
# eg. mplus.plot.sample_means('ex6.1.gh5','process1')
#
mplus.plot.sample_means <-function(file,procstr='process1',ptype='o') {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith sample means.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file, cstr2, 'properties')
series_type <- prop[1]
if ( ! (series_type == 1) ) {
cstr <- paste("- process does not have sample means:",procstr,"\n\n")
stop(cstr)
}
# get the time scores
xx <- proc$time_scores
# set up the array for the estimated means
dims <- attr(proc$time_scores,"dim")
yy <- mplus.get.sample_means(file,procstr)
# plot the means
cstr <- paste("Sample means for",procstr)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n')
symb <- array(c(21,22,23,24,25),c(dims[1]))
colors <- rainbow(dims[2])
for (i in c(1:dims[2])) {
lines(xx[,i],yy[,i],type=ptype,pch=symb[i],col=colors[i])
}
ldesc <- array(0,c(dims[2]))
lty <- array(0,c(dims[2]))
lwd <- array(0,c(dims[2]))
lcol <- array(0,c(dims[2]))
for (i in c(1:dims[2])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
lcol[i] <- colors[i]
}
legend('bottomright',col=lcol,pch=symb,ldesc,lty=lty,lwd=lwd)
}
##########################################################################
#
# mplus.plot.sample_and_estimated_means - plot sample and estimated means for the
# quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
#
# eg. mplus.plot.sample_and_estimated_means('process1')
#
mplus.plot.sample_and_estimated_means <-function(file,procstr='process1',ptype='o') {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith sample and estimated means.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( ! (series_type == 1) ) {
cstr <- paste("- process does not have sample means:",procstr,"\n\n")
stop(cstr)
}
# get the dimensions of the time_scores array and create an array with twice the size of the
# first dimension
dims <- attr(proc$time_scores,"dim")
xx <- array(0, c(dims[1],2*dims[2]))
yy <- array(0, c(dims[1],2*dims[2]))
samp <- mplus.get.sample_means(file,procstr)
emean <- mplus.get.estimated_means(file,procstr)
for (i in c(1:dims[1])) {
for (j in c(1:dims[2])) {
# set the time scores and pick up sample means
xx[i,2*j-1] <- proc$time_scores[i,j]
yy[i,2*j-1] <- samp[i,j]
# set the time scores and pick up estimated means
xx[i,2*j] <- proc$time_scores[i,j]
yy[i,2*j] <- emean[i,j]
}
}
# plot the means
cstr <- paste("Sample and estimated means for",procstr)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n')
symb <- array(c(21,22,23,24,25),c(dims[2]))
colors <- rainbow(dims[2])
for (i in c(1:dims[2])) {
lines(xx[,2*i-1],yy[,2*i-1],type=ptype,pch=symb[i],col=colors[i])
lines(xx[,2*i],yy[,2*i],type=ptype,pch=symb[i],col=colors[i])
}
ldesc <- array(0,c(2*dims[2]))
lty <- array(0,c(2*dims[2]))
lwd <- array(0,c(2*dims[2]))
lcol <- array(0,c(2*dims[2]))
lsymb <- array(0,c(2*dims[2]))
for (i in c(1:dims[2])) {
ldesc[2*i-1] <- sprintf("Sample means, Class %d", i)
lty[2*i-1] = 1
lwd[2*i-1] = 2.5
lsymb[2*i-1] <- symb[i]
lcol[2*i] <- colors[i]
ldesc[2*i] <- sprintf("Estimated means, Class %d", i)
lty[2*i] = 1
lwd[2*i] = 2.5
lcol[2*i] <- colors[i]
lsymb[2*i] <- symb[i]
}
legend('bottomright',col=lcol,pch=lsymb,ldesc,lty=lty,lwd=lwd)
}
##########################################################################
#
# mplus.plot.estimated_probabilities - plot estimated probabilities for the
# quoted process, summing up probabilities of the first to the last category
# chosen
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
# cat1 - the first category to include
# cat2 - the last category to include
#
# eg. mplus.plot.estimated_probabilities('ex8.4.gh5','process1',1,1)
#
mplus.plot.estimated_probabilities <- function(file,procstr='process1',cat1=1,cat2=1,ptype='o') {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated probabilities.\n")
}
# if cat2 is missing and cat1 is given, then we should assign cat2 to cat1.
if (missing(cat2)) {
if (!(missing(cat1))) {
cat2 <- cat1
}
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( !(series_type == 3) ) {
cstr <- paste("- process does not have estimated probabilities:",procstr,"\n\n")
stop(cstr)
}
# get the time scores
xx <- proc$time_scores
# set up the array for the estimated probabilities
dims <- attr(proc$time_scores,"dim")
yy <- mplus.get.estimated_probabilities(file,procstr,cat1,cat2)
# plot the probabilities
cstr <- paste("Estimated probabilities for",procstr)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n',ylim=c(0:1))
symb <- array(c(21,22,23,24,25),c(dims[1]))
colors <- rainbow(dims[2])
for (i in c(1:dims[2])) {
lines(xx[,i],yy[,i],type=ptype,pch=symb[i],col=colors[i])
}
ldesc <- array(0,c(dims[2]))
lty <- array(0,c(dims[2]))
lwd <- array(0,c(dims[2]))
lcol <- array(0,c(dims[2]))
for (i in c(1:dims[2])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
lcol[i] <- colors[i]
}
legend('bottomright',col=lcol,pch=symb,ldesc,lty=lty,lwd=lwd)
}
##########################################################################
#
# mplus.plot.sample_proportions - plot sample proportions for the
# quoted process, summing up proportions of the first to the last category
# chosen
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
# cat1 - the first category to include
# cat2 - the last category to include
#
# eg. mplus.plot.sample_proportions('ex8.4.gh5','process1',1,1)
#
mplus.plot.sample_proportions <-function(file,procstr='process1',cat1=1,cat2=1,ptype='o') {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith sample proportions.\n")
}
# if cat2 is missing and cat1 is given, then we should assign cat2 to cat1.
if (missing(cat2)) {
if (!(missing(cat1))) {
cat2 <- cat1
}
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
cat(cstr)
return(invisible(cstr))
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( !(series_type == 3) ) {
cstr <- paste("- process does not have sample proportions:",procstr,"\n\n")
stop(cstr)
}
# get the time scores
xx <- proc$time_scores
# set up the array for the sample proportions
dims <- attr(proc$time_scores,"dim")
# dims[1] is the number of time points, dims[2] is the number of classes
yy <- mplus.get.sample_proportions(file,procstr,cat1,cat2)
# plot the proportions
cstr <- paste("Sample proportions for",procstr)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n',ylim=c(0:1))
symb <- array(c(21,22,23,24,25),c(dims[1]))
for (k in c(1:dims[2])) {
lines(xx[,k],yy[,k],type=ptype,pch=symb[k])
}
}
##########################################################################
#
# mplus.get.estimated_means - plot estimated means for the quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file, required
# procstr - the quoted name of a series, not required. Defaults to 'process1' (the first process)
# classidx - the class index, not required - 0 for all classes. Default to 0.
#
# eg. mplus.get.estimated_means('ex8.1.gh5','process1',3)
#
mplus.get.estimated_means <-function(file,procstr='process1',classidx=0) {
if (missing(file)) {
stop(" - name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated means.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( ! (series_type == 1 || series_type == 2) ) {
cstr <- paste("- process does not have estimated means:",procstr,"\n\n")
stop(cstr)
}
# set up the array for the estimated means
dims <- attr(proc$time_scores,"dim")
# if all classes, dimension it by number of classes. Otherwise, just dimension by 1.
if (classidx == 0) {
yy <- array(0, c(dims[1],dims[2]))
} else {
# check that the classidx is within range.
if (classidx < 0 || classidx > dims[2]) {
cstr <- paste("- classidx is out of range, 1 to ",dims[2],": ",classidx,"\n\n")
stop(cstr)
}
yy <- array(0, c(dims[1],1))
}
# get the indices of variables in the series
var_names <- mplus.get.group.attribute(file,cstr2,'var_names')
if (series_type == 1) {
mean_vars <- mplus.get.group.attribute(file,'means_and_variances_data/y_estimated_means','variables')
} else {
mean_vars <- mplus.get.group.attribute(file,'means_and_variances_data/e_estimated_means','variables')
}
var_indices <- pmatch(var_names, mean_vars, nomatch=0)
# type 1 is estimated means for observed variables
if (series_type == 1) {
if (classidx == 0) {
for (i in c(1:dims[2])) {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_estimated_means$values[var_indices[j],i]
}
}
} else {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_estimated_means$values[var_indices[j],classidx]
}
}
}
# type 2 is estimated means for latent variables
if (series_type == 2) {
if (classidx == 0) {
for (i in c(1:dims[2])) {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$e_estimated_means$values[var_indices[j],i]
}
}
} else {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$e_estimated_means$values[var_indices[j],classidx]
}
}
}
# return the means
return(yy)
}
##########################################################################
#
# mplus.get.sample_means - return sample means for the quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file, required
# procstr - the quoted name of a series, not required. Defaults to 'process1' (the first process)
# classidx - the class index, not required - 0 for all classes. Default to 0.
#
# eg. mplus.get.sample_means('ex8.1.gh5','process1',3)
#
mplus.get.sample_means <- function(file,procstr='process1',classidx=0) {
if (missing(file)) {
stop("- the name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information.\n\nUse the SERIES option in Mplus to specify series information for processes\nwith sample means.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( ! (series_type == 1) ) {
cstr <- paste("- process does not have sample means:",procstr,"\n\n")
stop(cstr)
}
# get the time scores
xx <- proc$time_scores
# set up the array for the estimated means
dims <- attr(proc$time_scores,"dim")
# if all classes, dimension it by number of classes. Otherwise, just dimension by 1.
if (classidx == 0) {
yy <- array(0, c(dims[1],dims[2]))
} else {
# check that the classidx is within range.
if (classidx < 0 || classidx > dims[2]) {
cstr <- paste("- classidx is out of range, 1 to ",dims[2],": ",classidx,"\n\n")
stop(cstr)
}
yy <- array(0, c(dims[1],1))
}
# get the indices of variables in the series
var_names <- mplus.get.group.attribute(file,cstr2,'var_names')
mean_vars <- mplus.get.group.attribute(file,'means_and_variances_data/y_observed_means','variables')
var_indices <- pmatch(var_names, mean_vars, nomatch=0)
# only type 1 has sample means
if (classidx == 0) {
for (i in c(1:dims[2])) {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_observed_means$values[var_indices[j],i]
}
}
} else {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_observed_means$values[var_indices[j],classidx]
}
}
# return the means
return(yy)
}
##########################################################################
#
# mplus.get.estimated_modes - plot estimated modes for the quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file, required
# procstr - the quoted name of a series, not required. Defaults to 'process1' (the first process)
# classidx - the class index, not required - 0 for all classes. Default to 0.
#
# eg. mplus.get.estimated_modes('ex8.1.gh5','process1',3)
#
mplus.get.estimated_modes <-function(file,procstr='process1',classidx=0) {
if (missing(file)) {
stop(" - name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated modes.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( ! (series_type == 1 || series_type == 2) ) {
cstr <- paste("- process does not have estimated modes:",procstr,"\n\n")
stop(cstr)
}
# set up the array for the estimated modes
dims <- attr(proc$time_scores,"dim")
# if all classes, dimension it by number of classes. Otherwise, just dimension by 1.
if (classidx == 0) {
yy <- array(0, c(dims[1],dims[2]))
} else {
# check that the classidx is within range.
if (classidx < 0 || classidx > dims[2]) {
cstr <- paste("- classidx is out of range, 1 to ",dims[2],": ",classidx,"\n\n")
stop(cstr)
}
yy <- array(0, c(dims[1],1))
}
# get the indices of variables in the series
var_names <- mplus.get.group.attribute(file,cstr2,'var_names')
if (series_type == 1) {
mean_vars <- mplus.get.group.attribute(file,'means_and_variances_data/y_estimated_modes','variables')
} else {
mean_vars <- mplus.get.group.attribute(file,'means_and_variances_data/e_estimated_modes','variables')
}
var_indices <- pmatch(var_names, mean_vars, nomatch=0)
# type 1 is estimated means for observed variables
if (series_type == 1) {
if (classidx == 0) {
for (i in c(1:dims[2])) {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_estimated_modes$values[var_indices[j],i]
}
}
} else {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_estimated_modes$values[var_indices[j],classidx]
}
}
}
# type 2 is estimated means for latent variables
if (series_type == 2) {
if (classidx == 0) {
for (i in c(1:dims[2])) {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$e_estimated_modes$values[var_indices[j],i]
}
}
} else {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$e_estimated_modes$values[var_indices[j],classidx]
}
}
}
# return the modes
return(yy)
}
##########################################################################
#
# mplus.get.estimated_medians - plot estimated medians for the quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file, required
# procstr - the quoted name of a series, not required. Defaults to 'process1' (the first process)
# classidx - the class index, not required - 0 for all classes. Default to 0.
#
# eg. mplus.get.estimated_medians('ex8.1.gh5','process1',3)
#
mplus.get.estimated_medians <-function(file,procstr='process1',classidx=0) {
if (missing(file)) {
stop(" - name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated medians.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( ! (series_type == 1 || series_type == 2) ) {
cstr <- paste("- process does not have estimated medians:",procstr,"\n\n")
stop(cstr)
}
# set up the array for the estimated medians
dims <- attr(proc$time_scores,"dim")
# if all classes, dimension it by number of classes. Otherwise, just dimension by 1.
if (classidx == 0) {
yy <- array(0, c(dims[1],dims[2]))
} else {
# check that the classidx is within range.
if (classidx < 0 || classidx > dims[2]) {
cstr <- paste("- classidx is out of range, 1 to ",dims[2],": ",classidx,"\n\n")
stop(cstr)
}
yy <- array(0, c(dims[1],1))
}
# get the indices of variables in the series
var_names <- mplus.get.group.attribute(file,cstr2,'var_names')
if (series_type == 1) {
mean_vars <- mplus.get.group.attribute(file,'means_and_variances_data/y_estimated_medians','variables')
} else {
mean_vars <- mplus.get.group.attribute(file,'means_and_variances_data/e_estimated_medians','variables')
}
var_indices <- pmatch(var_names, mean_vars, nomatch=0)
# type 1 is estimated means for observed variables
if (series_type == 1) {
if (classidx == 0) {
for (i in c(1:dims[2])) {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_estimated_medians$values[var_indices[j],i]
}
}
} else {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_estimated_medians$values[var_indices[j],classidx]
}
}
}
# type 2 is estimated means for latent variables
if (series_type == 2) {
if (classidx == 0) {
for (i in c(1:dims[2])) {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$e_estimated_medians$values[var_indices[j],i]
}
}
} else {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$e_estimated_medians$values[var_indices[j],classidx]
}
}
}
# return the modes
return(yy)
}
##########################################################################
#
# mplus.get.time_scores - return time scores for the quoted process
#
# arguments:
# procstr - the quoted name of a series
#
# eg. mplus.get.time_scores('ex6.1.gh5', 'process1')
#
mplus.get.time_scores <- function(file,procstr='process1') {
if (missing(file)) {
stop("- the name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information.\n\nUse the SERIES option in Mplus to specify series information for processes\nwith sample means.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
# get the time scores
xx <- proc$time_scores
return(xx)
}
##########################################################################
#
# mplus.get.estimated_probabilities - return estimated probabilities for the
# quoted process, summing up probabilities of the first to the last category
# chosen
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
# cat1 - the first category to include
# cat2 - the last category to include
#
# eg. mplus.get.estimated_probabilities('ex8.4.gh5','process1',1,1)
#
mplus.get.estimated_probabilities <- function(file,procstr='process1',cat1=1,cat2=1) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated probabilities.\n")
}
# if cat2 is missing and cat1 is given, then we should assign cat2 to cat1.
if (missing(cat2)) {
if (!(missing(cat1))) {
cat2 <- cat1
}
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( !(series_type == 3) ) {
cstr <- paste("- process does not have estimated probabilities:",procstr,"\n\n")
stop(cstr)
}
# set up the array for the estimated probabilities
dims <- attr(proc$time_scores,"dim")
yy <- array(0, c(dims[1],dims[2]))
# get indices and names of the variables in the series
var_indices <- mplus.get.group.attribute(file,cstr2,'var_indices')
var_names <- mplus.get.group.attribute(file,cstr2,'var_names')
# get categorical data information then look up the variables in the process
# in categorical_data so we can get the number of categories for each variable in the process
# this would be achieved by categories[cat_indices[i]] for variable i in the process
categories <- mplus.get.group.attribute(file,'categorical_data','categories')
catvars <- mplus.get.group.attribute(file,'categorical_data','var_names')
cat_indices <- pmatch(var_names, catvars, nomatch=0)
# get the probabilities
for (i in c(1:dims[1])) {
for (j in c(1:dims[2])) {
start_index <- sum(as.integer(categories[1:cat_indices[i]-1]))
startk <- cat1 + start_index
endk <- cat2 + start_index
yy[i,j] <- sum(gh5$means_and_variances_data$estimated_probs$values[startk:endk,j])
}
}
# return the probabilities
return(yy);
}
##########################################################################
#
# mplus.get.sample_proportions - return sample proportions for the
# quoted process, summing up proportions of the first to the last category
# chosen
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
# cat1 - the first category to include
# cat2 - the last category to include
#
# eg. mplus.get.sample_proportions('ex8.4.gh5','process1',1,1)
#
mplus.get.sample_proportions <-function(file,procstr='process1',cat1=1,cat2=1) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith sample proportions.\n")
}
if (missing(procstr)) {
stop("- requires the name of a series\n\nUse mplus.list.processes() to get the list of series processes.")
}
# if cat2 is missing and cat1 is given, then we should assign cat2 to cat1.
if (missing(cat2)) {
if (!(missing(cat1))) {
cat2 <- cat1
}
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( ! (series_type == 3) ) {
cstr <- paste("- process does not have sample proportions:",procstr,"\n\n")
stop(cstr)
}
# set up the array for the sample proportions
dims <- attr(proc$time_scores,"dim")
# dims[1] is the number of time points, dims[2] is the number of classes
yy <- array(0, c(dims[1],dims[2]))
# get indices and names of the variables in the series
var_indices <- mplus.get.group.attribute(file,cstr2,'var_indices')
var_names <- mplus.get.group.attribute(file,cstr2,'var_names')
# get categorical data information then look up the variables in the process
# in categorical_data so we can get the number of categories for each variable in the process
# this would be achieved by categories[cat_indices[i]] for variable i in the process
categories <- mplus.get.group.attribute(file,'categorical_data','categories')
catvars <- mplus.get.group.attribute(file,'categorical_data','var_names')
cat_indices <- pmatch(var_names, catvars, nomatch=0)
# get the proportions
for (i in c(1:dims[1])) {
for (j in c(1:dims[2])) {
start_index <- sum(categories[1:cat_indices[i]-1])
startk <- cat1 + start_index
endk <- cat2 + start_index
yy[i,j] <- sum(gh5$means_and_variances_data$observed_probs$values[startk:endk,j])
}
}
# return the proportions
return(yy)
}
##########################################################################
#
# mplus.list.variables - list the variables in individual data
#
# arguments: none
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.variables('ex8.1.gh5')
#
mplus.list.variables <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if individual data exists
if ( !("individual_data" %in% names(gh5)) ) {
stop("mplus.list.variables requires individual data.\n\nUse TYPE=PLOT1 or TYPE=PLOT3 setting in Mplus to store individual data.")
}
cat(c("\nList of variable names to use in the following functions:\n"))
cat(c(" - mplus.plot.histogram\n"))
cat(c(" - mplus.plot.scatterplot\n"))
cat(c(" - mplus.get.data\n"))
cat(c("\nVariables:\n"))
# get the variable names from individual_data and lookup the indices
var_names <- mplus.get.group.attribute(file, 'individual_data', 'var_names')
var_names <- gsub("(^\\s+|\\s+$)", "", var_names, perl=TRUE)
var_names
}
##########################################################################
#
# mplus.get.data - return the individual data for the quoted variable
#
# arguments:
# file - the quoted name of an existing GH5 file
# v - name of variable to plot
#
# eg. mplus.get.data('ex8.1.gh5','y1')
#
mplus.get.data <- function(file,v) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
if (!("individual_data" %in% names(gh5))) {
stop("mplus.get.data requires individual data.\n\nUse TYPE=PLOT1 or TYPE=PLOT3 setting in Mplus to store individual data.")
}
if (missing(v)) {
stop("mplus.get.data requires the name of a variable.\n\nUse mplus.list.variables() to get the list of variable names.")
}
# variables are stored in uppercase
var <- toupper(v)
# get the variable names from individual_data and lookup the indices
var_names <- mplus.get.group.attribute(file, 'individual_data', 'var_names')
index <- pmatch(var, var_names, nomatch=0)
if (index == 0) {
cstr <- paste(c("Unknown variable:"),var,"\n")
stop(cstr)
}
# get the data for the variable
xx <- gh5$individual_data$raw_data[index,]
xx
}
##########################################################################
#
# mplus.plot.scatterplot - plot the scatterplot for the 2 quoted variables
#
# arguments:
# file - the quoted name of an existing GH5 file
# xv - name of variable on the x-axis
# yv - name of variable on the y-axis
#
# eg. mplus.plot.scatterplot('ex8.1.gh5','y1','y2')
#
mplus.plot.scatterplot <- function(file, xv, yv) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if individual data exists
if ( !("individual_data" %in% names(gh5)) ) {
stop("mplus.plot.scatterplot requires individual data.\n\nUse TYPE=PLOT1 or TYPE=PLOT3 setting in Mplus to store individual data\nfor scatterplots.")
}
if (missing(xv) || missing(yv)) {
stop("mplus.plot.scatterplot requires the names of two variables.\n\nUse mplus.list.variables() to get the list of variable names.")
}
# variables are stored in uppercase
xvar <- toupper(xv)
yvar <- toupper(yv)
# get the variable names from individual_data and lookup the indices
var_names <- mplus.get.group.attribute(file, 'individual_data', 'var_names')
xindex <- pmatch(xvar, var_names, nomatch=0)
yindex <- pmatch(yvar, var_names, nomatch=0)
if (xindex == 0) {
cstr <- paste(c("Unknown x-variable:"),xvar,"\n")
stop(cstr)
}
if (yindex == 0) {
cstr <- paste(c("Unknown y-variable:"),yvar,"\n")
stop(cstr)
}
# get the data for the 2 variables
xx <- mplus.get.data(file,xvar)
yy <- mplus.get.data(file,yvar)
plot(xx,yy,xlab=xvar,ylab=yvar)
}
##########################################################################
#
# mplus.plot.histogram - plot the histogram for the quoted variable, using the
# specified number of bins (the default is 10 bins)
#
# arguments:
# file - the quoted name of an existing GH5 file
# v - name of variable to plot
# bins - the number of bins to use
#
# eg. mplus.plot.histogram('y1',5)
#
mplus.plot.histogram <- function(file,v,bins=10) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if individual data exists
if ( !("individual_data" %in% names(gh5)) ) {
stop("mplus.plot.histogram requires individual data.\n\nUse TYPE=PLOT1 or TYPE=PLOT3 setting in Mplus to store individual data\nfor histograms.")
}
if (missing(v)) {
stop("mplus.plot.histogram requires the name of a variable.\n\nUse mplus.list.variables() to get the list of variable names.")
}
# the number of bins should be greater than 0
if (bins <= 0) {
stop("The number of bins should be greater than 0.")
}
# variables are stored in uppercase
var <- toupper(v)
# get the variable names from individual_data and lookup the indices
var_names <- mplus.get.group.attribute(file, 'individual_data', 'var_names')
index <- pmatch(var, var_names, nomatch=0)
if (index == 0) {
cstr <- paste(c("Unknown variable:"),var,"\n")
stop(cstr)
}
xx <- mplus.get.data(file,v)
cstr <- paste(c("Histogram of"),var)
hist(xx,breaks=seq(min(xx),max(xx),length=bins+1),col="red",main=cstr,xlab=var,right=TRUE)
}
######################################################################################################
# Functions for BAYESIAN plots
######################################################################################################
#=========================================================================
#
# mplus.list.bayesian.parameters - list the parameters in bayesian data
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.bayesian.parameters('ex8.1.gh5')
#
mplus.list.bayesian.parameters <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("mplus.list.bayesian.parameters requires bayesian data.\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
cat(c("\nList of parameters to use in the following functions:\n"))
cat(c(" - mplus.plot.bayesian.trace_plots\n"))
cat(c("\nParameters:\n"))
# get the parameter statements from bayesian_data and lookup the indices
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
nplaus <- length(statements)
for (i in c(1:nplaus)) {
cstr <- sprintf("[%d] %s", i, statements[i])
cat(cstr,sep="\n")
}
invisible(statements)
}
#=========================================================================
#
# mplus.get.bayesian.parameter.data - get the bayesian data for the given parameter/chain
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramstr - the quoted name of a parameter or the parameter index
# chainnum - the chain number
#
# eg. mplus.get.bayesian.parameter.data('ex8.1.gh5','parameter 1',1)
#
mplus.get.bayesian.parameter.data <- function(file,paramstr,chainnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data.\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (is.character(paramstr)) {
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
statements <- tolower(statements)
paramstr <- tolower(paramstr)
paramidx <- pmatch(paramstr, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("Unknown parameter:"),paramstr,"\n")
stop(cstr)
}
} else {
# first dimension is the number of parameters
# second dimension is the number of iterations
# third dimension is the number of chains
dims <- attr(gh5$bayesian_data$parameters_autocorr$parameters,"dim")
paramidx <- paramstr
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- parameter index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
xx <- gh5$bayesian_data$parameters_autocorr$parameters[paramidx,,chainnum]
xx
}
#=========================================================================
#
# mplus.get.bayesian.prior.parameter.data - get the prior data for the given parameter
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramstr - the quoted parameter label or the parameter index
#
# eg. mplus.get.bayesian.prior.parameter.data('ex8.1.gh5',1)
#
mplus.get.bayesian.prior.parameter.data <- function(file,paramstr) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data.\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
# first dimension is the number of parameters
# second dimension is the number of priors
dims <- attr(gh5$bayesian_data$parameters_autocorr$priors,"dim")
if (is.character(paramstr)) {
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
statements <- tolower(statements)
paramstr <- tolower(paramstr)
paramidx <- pmatch(paramstr, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown parameter:"),paramstr,"\n")
stop(cstr)
}
} else {
# first dimension is the number of parameters
# second dimension is the number of priors
dims <- attr(gh5$bayesian_data$parameters_autocorr$priors,"dim")
paramidx <- paramstr
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- parameter index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
xx <- gh5$bayesian_data$parameters_autocorr$priors[,paramidx]
xx
}
#=========================================================================
#
# mplus.get.bayesian.autocorrelation - get the autocorrelation data for the given parameter
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramidx - the quoted parameter label
# chainnum - the chain number
#
# eg. mplus.get.bayesian.autocorrelation('ex8.1.gh5','parameter 1',1)
#
mplus.get.bayesian.autocorrelation <- function(file,paramstr,chainnum=1) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data.\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
# first dimension is the number of autocorrelation
# second dimension is the number of parameters
# third dimension is the number of chains
dims <- attr(gh5$bayesian_data$parameters_autocorr$autocorrelation,"dim")
if (is.character(paramstr)) {
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
statements <- tolower(statements)
paramstr <- tolower(paramstr)
paramidx <- pmatch(paramstr, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("Unknown parameter:"),paramstr,"\n")
stop(cstr)
}
} else {
paramidx <- paramstr
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- parameter index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
if (chainnum < 1 && chainnum > dims[3]) {
cstr <- paste("- invalid chain number: ", chainnum,"\n\nThe chain number must be between 1 and ", dims[3], ".")
stop(cstr)
}
xx <- gh5$bayesian_data$parameters_autocorr$autocorrelation[,paramidx,chainnum]
xx
}
#=========================================================================
#
# mplus.plot.bayesian.traceplot - list the parameters in bayesian data
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramstr - the quoted name of a parameter
#
# eg. mplus.plot.bayesian.traceplot('ex8.1.gh5','parameter 1')
#
mplus.plot.bayesian.traceplot <- function(file,paramstr) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
# get the dimensions of parameters array
# first dimension is the number of parameters
# second dimension is the number of iterations
# third dimension is the number of chains
dims <- attr(gh5$bayesian_data$parameters_autocorr$parameters,"dim")
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
if (is.character(paramstr)) {
lcstatements <- tolower(statements)
paramstr <- tolower(paramstr)
paramidx <- pmatch(paramstr, lcstatements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown parameter:"),paramstr,"\n")
stop(cstr)
}
} else {
paramidx <- paramstr
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- parameter index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
label <- statements[paramidx]
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
xx <- array(0, c(dims[2],dims[3]))
yy <- array(0, c(dims[2],dims[3]))
for (i in c(1:dims[3])) {
yy[,i] <- mplus.get.bayesian.parameter.data(file, paramidx, i)
}
for (i in c(1:dims[2])) {
xx[i,] <- i
}
colors <- rainbow(dims[3])
ndist <- mplus.get.dataset.attribute(file, 'bayesian_data/parameters_autocorr/parameters', 'ndistribution')
# plot the traceplot
cstr <- paste("Trace plot of:",label)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n')
for (i in c(1:dims[3])) {
lines(xx[,i],yy[,i],col=colors[i])
}
abline(v=ndist,untf=FALSE,col='red')
}
#=========================================================================
#
# mplus.plot.bayesian.distribution - plot the histogram for the parameter, using the
# specified number of bins (the default is 100 bins)
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramstr - the quoted name of the parameter
# bins - the number of bins to use
#
# eg. mplus.plot.bayesian.distribution('bayes.gh5','parameter 1',50)
#
mplus.plot.bayesian.distribution <- function(file,paramstr,bins=100) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(paramstr)) {
stop("- requires the parameter label or index.\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
# the number of bins should be greater than 0
if (bins <= 0) {
stop("The number of bins should be greater than 0.")
}
# get the dimensions of parameters array
# first dimension is the number of parameters
# second dimension is the number of iterations
# third dimension is the number of chains
dims <- attr(gh5$bayesian_data$parameters_autocorr$parameters,"dim")
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
if (is.character(paramstr)) {
lcstatements <- tolower(statements)
paramstr <- tolower(paramstr)
paramidx <- pmatch(paramstr, lcstatements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown parameter:"),paramstr,"\n")
stop(cstr)
}
} else {
paramidx <- paramstr
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("Parameter index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
label <- statements[paramidx]
ndist <- mplus.get.dataset.attribute(file, 'bayesian_data/parameters_autocorr/parameters', 'ndistribution')
xxc <- array(0, c(dims[2],dims[3]))
xx <- array(0, c((dims[2]-ndist)*dims[3]))
for (i in c(1:dims[3])) {
xxc[,i] <- mplus.get.bayesian.parameter.data(file, paramidx, i)
}
start <- 0
#print(dims)
for (i in c(1:dims[3])) {
for (j in c((ndist+1):dims[2])) {
start <- start + 1
#cstr <- paste(start, j, i)
#print(cstr)
#print(xxc[j,i])
xx[start] <- xxc[j,i]
}
}
cstr <- paste(c("Distribution of:"),label)
h <- hist(xx,breaks=seq(min(xx),max(xx),length=bins+1),col="red",main=cstr,xlab='Estimate',ylab='Count')
xxmode <- h$mids[h$counts == max(h$counts)]
xxmean <- mean(xx)
xxsd <- sd(xx)
xxmedian <- median(xx)
left <- quantile(xx, 0.025)
right <- quantile(xx, 0.975)
abline(v=xxmode,untf=FALSE,col='green')
abline(v=xxmean,untf=FALSE,col='brown')
abline(v=xxmedian,untf=FALSE,col='purple')
abline(v=left,untf=FALSE,col='blue')
abline(v=right,untf=FALSE,col='blue')
modestr <- sprintf("Mode = %0.5f", xxmode)
meanstr <- sprintf("Mean = %0.5f, Std Dev = %0.5f", xxmean, xxsd)
medianstr <- sprintf("Median = %0.5f", xxmedian)
lowci <- sprintf("95%% Lower CI = %0.5f", left)
uppci <- sprintf("95%% Upper CI = %0.5f", right)
ldesc <- c(meanstr, medianstr, modestr, lowci, uppci)
lcol <- c('brown','purple','green','blue','blue')
legend("topright",ldesc,col=lcol,lty=c(1,1,1,1,1),lwd=c(2.5,2.5,2.5,2.5,2.5))
#invisible(xx)
}
#=========================================================================
#
# mplus.plot.bayesian.prior.distribution - plot the histogram for the parameter, using the
# specified number of bins (the default is 100 bins)
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramstr - the quoted name of the parameter
# bins - the number of bins to use
#
# eg. mplus.plot.bayesian.prior.distribution('bayes.gh5','parameter 1',50)
#
mplus.plot.bayesian.prior.distribution <- function(file,paramstr,bins=100) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(paramstr)) {
stop("- requires the parameter index\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
# the number of bins should be greater than 0
if (bins <= 0) {
stop("- the number of bins should be greater than 0")
}
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
# get the dimensions of parameters array
# first dimension is the number of parameters
# second dimension is the number of priors
dims <- attr(gh5$bayesian_data$parameters_autocorr$priors,"dim")
if (is.character(paramstr)) {
lcstatements <- tolower(statements)
paramstr <- tolower(paramstr)
paramidx <- pmatch(paramstr, lcstatements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown parameter:"),paramstr,"\n")
stop(cstr)
}
} else {
paramidx <- paramstr
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- parameter index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
plabel <- statements[paramidx]
xx <- mplus.get.bayesian.prior.parameter.data(file, paramidx)
if (min(xx) == 999 && max(xx) == 999) {
stop("- prior distributions for this parameter cannot be displayed because the prior is improper")
} else if (min(xx) == 998 && max(xx) == 998) {
stop("- prior distributions for this parameter are not available")
}
cstr <- paste(c("Prior distribution of:"),plabel)
h <- hist(xx,breaks=seq(min(xx),max(xx),length=bins+1),col="red",main=cstr,xlab='Estimate',ylab='Count')
xxmode <- h$mids[h$counts == max(h$counts)]
xxmean <- mean(xx)
xxsd <- sd(xx)
xxmedian <- median(xx)
left <- quantile(xx, 0.025)
right <- quantile(xx, 0.975)
abline(v=xxmode,untf=FALSE,col='green')
abline(v=xxmean,untf=FALSE,col='brown')
abline(v=xxmedian,untf=FALSE,col='purple')
abline(v=left,untf=FALSE,col='blue')
abline(v=right,untf=FALSE,col='blue')
modestr <- sprintf("Mode = %0.5f", xxmode)
meanstr <- sprintf("Mean = %0.5f, Std Dev = %0.5f", xxmean, xxsd)
medianstr <- sprintf("Median = %0.5f", xxmedian)
lowci <- sprintf("95%% Lower CI = %0.5f", left)
uppci <- sprintf("95%% Upper CI = %0.5f", right)
ldesc <- c(meanstr, medianstr, modestr, lowci, uppci)
lcol <- c('brown','purple','green','blue','blue')
legend("topright",ldesc,col=lcol,lty=c(1,1,1,1,1),lwd=c(2.5,2.5,2.5,2.5,2.5))
invisible(xx)
}
#=========================================================================
#
# mplus.plot.bayesian.autocorrelation - plot the autocorrelation histogram for the parameter
# for the given chain
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramstr - the quoted name of the parameter
# chainnum - the chain number
#
# eg. mplus.plot.bayesian.autocorrelation('bayes.gh5','parameter 1',1)
#
mplus.plot.bayesian.autocorrelation <- function(file,paramstr,chainnum=1) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian dat.\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(paramstr)) {
stop("- requires the parameter label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
# get the dimensions of parameters array
# first dimension is the number of autocorrelations
# second dimension is the number of parameters
# third dimension is the number of chains
dims <- attr(gh5$bayesian_data$parameters_autocorr$autocorrelation,"dim")
if (is.character(paramstr)) {
lcstatements <- tolower(statements)
paramstr <- tolower(paramstr)
paramidx <- pmatch(paramstr, lcstatements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown parameter:"),paramstr,"\n")
stop(cstr)
}
} else {
paramidx <- paramstr
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- parameter index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
plabel <- statements[paramidx]
if (chainnum < 1 && chainnum > dims[3]) {
cstr <- paste("- invalid chain number: ", chainnum,"\n\nThe chain number must be between 1 and ", dims[3], ".")
stop(cstr)
}
yy <- mplus.get.bayesian.autocorrelation(file,paramidx,chainnum)
xx <- as.character(1:dims[1])
cstr <- paste(c("Autocorrelation (chain "),format(chainnum),c("): "),plabel)
barplot(yy,ylim=c(-1,1),names.arg=xx,col='red',main=cstr)
invisible(xx)
}
#=========================================================================
#
# mplus.list.bayesian.predictive.labels - list the parameters in bayesian data
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.bayesian.predictive.labels('ex8.1.gh5')
#
mplus.list.bayesian.predictive.labels <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
cat(c("\nList of parameters to use in the following functions:\n"))
cat(c(" - mplus.plot.bayesian.predictive.scatterplots\n"))
cat(c(" - mplus.plot.bayesian.predictive.distribution\n"))
cat(c("\nPredictive labels:\n"))
# get the parameter statements from bayesian_data and lookup the indices
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
statements
}
#=========================================================================
#
# mplus.get.bayesian.predictive.observed - get the predictive observed data
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the quoted name of the parameter
#
# eg. mplus.get.bayesian.predictive.observed('bayes.gh5','parameter 1')
#
mplus.get.bayesian.predictive.observed <- function(file,plabel) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the predictive label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
if (is.character(plabel)) {
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
# get the dimensions of parameters array
# first dimension is the number of ???
# second dimension is the number of predictive labels
dims <- attr(gh5$bayesian_data$predictive$observed,"dim")
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[2]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
xx <- gh5$bayesian_data$predictive$observed[,paramidx]
xx
}
#=========================================================================
#
# mplus.get.bayesian.predictive.replicated - get the predictive replicated data
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the quoted name of the parameter
#
# eg. mplus.get.bayesian.predictive.replicated('bayes.gh5','parameter 1')
#
mplus.get.bayesian.predictive.replicated <- function(file,plabel) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the predictive label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
if (is.character(plabel)) {
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
# get the dimensions of parameters array
# first dimension is the number of ???
# second dimension is the number of predictive labels
dims <- attr(gh5$bayesian_data$predictive$replicated,"dim")
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[2]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
xx <- gh5$bayesian_data$predictive$replicated[,paramidx]
xx
}
#=========================================================================
#
# mplus.get.bayesian.predictive.lowerci - get the predictive lower CI
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the quoted name of the parameter
#
# eg. mplus.get.bayesian.predictive.lowerci('bayes.gh5','parameter 1')
#
mplus.get.bayesian.predictive.lowerci <- function(file,plabel) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the predictive label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
if (is.character(plabel)) {
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
# get the dimensions of parameters array
# first dimension is the number of pvalues
# second dimension is the number of predictive labels
dims <- attr(gh5$bayesian_data$predictive$pvalues,"dim")
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[2]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
gh5$bayesian_data$predictive$pvalues[1,paramidx]
}
#=========================================================================
#
# mplus.get.bayesian.predictive.upperci - get the predictive upper CI
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the quoted name of the parameter
#
# eg. mplus.get.bayesian.predictive.upperci('bayes.gh5','parameter 1')
#
mplus.get.bayesian.predictive.upperci <- function(file,plabel) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the predictive label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
if (is.character(plabel)) {
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
# get the dimensions of parameters array
# first dimension is the number of pvalues
# second dimension is the number of predictive labels
dims <- attr(gh5$bayesian_data$predictive$pvalues,"dim")
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[2]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
gh5$bayesian_data$predictive$pvalues[2,paramidx]
}
#=========================================================================
#
# mplus.get.bayesian.predictive.pvalue - get the predictive pvalue
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the quoted name of the parameter
#
# eg. mplus.get.bayesian.predictive.pvalue('bayes.gh5','parameter 1')
#
mplus.get.bayesian.predictive.pvalue <- function(file,plabel) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the predictive label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
if (is.character(plabel)) {
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
# get the dimensions of parameters array
# first dimension is the number of pvalues
# second dimension is the number of predictive labels
dims <- attr(gh5$bayesian_data$predictive$pvalues,"dim")
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[2]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
gh5$bayesian_data$predictive$pvalues[3,paramidx]
}
#=========================================================================
#
# mplus.get.bayesian.predictive.pvalue_type - get the predictive pvalue type
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the quoted name of the parameter
#
# eg. mplus.get.bayesian.predictive.pvalue_type('bayes.gh5','parameter 1')
#
mplus.get.bayesian.predictive.pvalue_type <- function(file,plabel) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the predictive label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
ptypes <- mplus.get.group.attribute(file,'/bayesian_data/predictive','types')
if (is.character(plabel)) {
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
# get the dimensions of parameters array
# first dimension is the number of pvalues
dims <- attr(ptypes,"dim")
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
ptypes[paramidx]
}
#=========================================================================
#
# mplus.plot.bayesian.predictive.scatterplot - plot the predictive checking scatterplot
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the predictive label
#
# eg. mplus.plot.bayesian.predictive.scatterplot('bayes.gh5','label 1')
#
mplus.plot.bayesian.predictive.scatterplot <- function(file,plabel) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the predictive label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
dims <- attr(statements,"dim")
if (is.character(plabel)) {
lcstatements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, lcstatements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
rep <- mplus.get.bayesian.predictive.replicated(file,paramidx)
obs <- mplus.get.bayesian.predictive.observed(file,paramidx)
omin <- min(obs)
omax <- max(obs)
rmin <- min(rep)
rmax <- max(rep)
if (omin < rmin) {
rmin <- omin
}
if (omax > rmax) {
rmax <- omax
}
plot(obs,rep,xlab='Observed',ylab='Replicated',xlim=c(rmin,rmax),ylim=c(rmin,rmax))
# print(rmin)
# print(rmax)
xx=c(rmin,rmax)
yy=c(rmin,rmax)
lines(xx,yy,col='green')
#text(50,50,"test")
lowci <- mplus.get.bayesian.predictive.lowerci(file,paramidx)
uppci <- mplus.get.bayesian.predictive.upperci(file,paramidx)
pval <- mplus.get.bayesian.predictive.pvalue(file,paramidx)
ptype <- mplus.get.bayesian.predictive.pvalue_type(file,paramidx)
if (ptype == -1) {
text2 <- "(Proportion of Points in the Lower Right Half)";
}
else if (ptype == 1) {
text2 <- "(Proportion of Points in the Upper Left Half)";
} else {
text2 <- "(Smallest Proportion of Points in the Upper versus Lower Halves)";
}
#ldesc <- sprintf("95%% Confidence Interval for the Difference\n%0.3f %0.3f\nPosterior Predictive P-Value %0.3f\n%s",
# lowci, uppci, pval, text2)
#mtext(ldesc, side=3)
line1 <- sprintf("95%% Confidence Interval for the Difference")
line2 <- sprintf(" %0.3f %0.3f ", lowci, uppci)
line3 <- sprintf("")
line4 <- sprintf(" Posterior Predictive P-Value %0.3f ", pval)
line5 <- sprintf("")
line6 <- text2
ldesc <- c(line1,line2,line3,line4,line5,line6)
legend('topleft',ldesc,xjust=1)
title(statements[paramidx])
}
#=========================================================================
#
# mplus.plot.bayesian.predictive.distribution - plot the predictive checking distribution
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the predictive label
# bins - the number of bins, default is 10
#
# eg. mplus.plot.bayesian.predictive.distribution('bayes.gh5','label 1')
#
mplus.plot.bayesian.predictive.distribution <- function(file,plabel,bins=100) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the index of the predictive label\n\nUse mplus.list.bayesian.predictive.labels() to get the list of parameters.")
}
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
dims <- attr(statements,"dim")
if (is.character(plabel)) {
lcstatements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, lcstatements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
rep <- mplus.get.bayesian.predictive.replicated(file,paramidx)
obs <- mplus.get.bayesian.predictive.observed(file,paramidx)
omin <- min(obs)
omax <- max(obs)
rmin <- min(rep)
rmax <- max(rep)
if (omin < rmin) {
rmin <- omin
}
if (omax > rmax) {
rmax <- omax
}
npred <- length(rep)
vals <- array(c(npred))
for (i in c(1:npred)) {
vals[i] <- obs[i] - rep[i]
}
hist(vals,breaks=seq(min(vals),max(vals),length=bins+1),col="red",main=statements[paramidx],xlab='Observed - Replicated',ylab='Count')
xxmedian <- median(vals)
abline(v=xxmedian,untf=FALSE,col='purple')
# print(rmin)
# print(rmax)
xx=c(rmin,rmax)
yy=c(rmin,rmax)
lines(xx,yy,col='green')
#text(50,50,"test")
lowci <- mplus.get.bayesian.predictive.lowerci(file,paramidx)
uppci <- mplus.get.bayesian.predictive.upperci(file,paramidx)
pval <- mplus.get.bayesian.predictive.pvalue(file,paramidx)
#ldesc <- sprintf("95%% Confidence Interval for the Difference\n%0.3f %0.3f\nPosterior Predictive P-Value %0.3f\n%s",
# lowci, uppci, pval, text2)
#mtext(ldesc, side=3)
line1 <- sprintf("95%% Confidence Interval for the Difference")
line2 <- sprintf(" %0.3f %0.3f ", lowci, uppci)
line3 <- sprintf("")
line4 <- sprintf(" Posterior Predictive P-Value %0.3f ", pval)
ldesc <- c(line1,line2,line3,line4)
legend('topleft',ldesc,xjust=1)
}
#=========================================================================
#
# mplus.list.bayesian.plausible.labels - list the plausible labels in bayesian data
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.bayesian.plausible.labels('ex8.1.gh5')
#
mplus.list.bayesian.plausible.labels <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("mplus.list.bayesian.plausible.labels requires bayesian data and factor scores.\n\nUse TYPE=PLOT3 and the FACTORS option in Mplus with a Bayesian analysis.")
}
# check if plausible exists
if ( !("plausible" %in% names(gh5$bayesian_data)) ) {
stop("mplus.list.bayesian.plausible.labels requires bayesian data factor scores.\n\nUse TYPE=PLOT3 and the FACTORS option in Mplus with a Bayesian analysis.")
}
cat(c("\nList of labels to use in the following functions:\n"))
cat(c(" - mplus.plot.bayesian.plausible.distribution\n"))
cat(c("\nPlausible labels:\n"))
# get the parameter statements from bayesian_data and lookup the indices
statements <- mplus.get.group.attribute(file, 'bayesian_data/plausible', 'plauslabels')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
nplaus <- length(statements)
for (i in c(1:nplaus)) {
cstr <- sprintf("[%d] %s", i, statements[i])
cat(cstr,sep="\n")
}
# cat(statements,sep="\n")
invisible(statements)
}
#=========================================================================
#
# mplus.get.bayesian.plausible.data - get plausible data for the given plausible label
#
# arguments:
# file - the quoted name of an existing GH5 file
# plauslabel - the plausible label or the index of the plausible label
# obs - the observation index or 0 for overall
#
# eg. mplus.get.bayesian.plausible.data('ex8.1.gh5',1,obs)
#
mplus.get.bayesian.plausible.data <- function(file,plauslabel,obs=0) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data and factor scores\n\nUse TYPE=PLOT3 and the FACTORS option in Mplus with a Bayesian analysis.")
}
# check if plausible exists
if ( !("plausible" %in% names(gh5$bayesian_data)) ) {
stop("- requires bayesian data and factor scores\n\nUse TYPE=PLOT3 and the FACTORS option in Mplus with a Bayesian analysis.")
}
if (missing(plauslabel)) {
stop("- requires the plausible label\n\nUse mplus.list.bayesian.plausible.labels() to get the list of plausible labels.")
}
if (is.character(plauslabel)) {
labels <- mplus.get.group.attribute(file,'bayesian_data/plausible','plauslabels')
labels <- gsub("(^\\s+|\\s+$)", "", labels, perl=TRUE)
labels <- tolower(labels)
plauslabel <- tolower(plauslabel)
paramidx <- pmatch(plauslabel, labels, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown plausible label:"),plauslabel,"\n")
stop(cstr)
}
} else {
# get the dimensions of plausible array
# first dimension is the number of observations
# second dimension is the number of imputations
# third dimension is the number of labels
dims <- attr(gh5$bayesian_data$plausible$plausible,"dim")
paramidx <- plauslabel
if (paramidx < 1 || paramidx > dims[3]) {
cstr <- paste("- plausible label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.plausible.labels to see the list of plausible labels.\n")
stop(cstr)
}
}
if (obs == 0) {
xx <- array(0, c(dims[1]*dims[2]))
start <- 0
for (i in c(1:dims[1])) {
for (j in c(1:dims[2])) {
start <- start + 1
xx[start] <- gh5$bayesian_data$plausible$plausible[i,j,paramidx]
}
}
} else {
xx <- gh5$bayesian_data$plausible$plausible[obs,,paramidx]
}
xx
}
#=========================================================================
#
# mplus.plot.bayesian.plausible.distribution - plot the histogram for the plausible label, using the
# specified number of bins (the default is 100 bins for overall and 10 for a specific observation)
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramstr - name or index of variable to plot
# obs - the observation number or 0
# bins - the number of bins to use
#
# eg. mplus.plot.bayesian.plausible.distribution('bayes.gh5',1,0)
#
mplus.plot.bayesian.plausible.distribution <- function(file,plauslabel,obs=0,bins=100) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data and factor scores\n\nUse TYPE=PLOT3 and the FACTORS option in Mplus with a Bayesian analysis.")
}
# check if plausible exists
if ( !("plausible" %in% names(gh5$bayesian_data)) ) {
stop("- requires bayesian data and factor scores\n\nUse TYPE=PLOT3 and the FACTORS option in Mplus with a Bayesian analysis.")
}
if (missing(plauslabel)) {
stop("- requires the index of the plausible label\n\nUse mplus.list.bayesian.plausible.labels() to get the list of plausible labels.")
}
if (missing(bins)) {
if (obs == 0) {
bins = 100
} else {
bins = 10
}
}
# the number of bins should be greater than 0
if (bins <= 0) {
stop("- the number of bins should be greater than 0")
}
labels <- mplus.get.group.attribute(file,'bayesian_data/plausible','plauslabels')
labels <- gsub("(^\\s+|\\s+$)", "", labels, perl=TRUE)
adim <- attr(labels,'dim')
if (is.character(plauslabel)) {
lclabels <- tolower(labels)
plauslabel <- tolower(plauslabel)
paramidx <- pmatch(plauslabel, lclabels, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown plausible label:"),plauslabel,"\n")
stop(cstr)
}
} else {
paramidx <- plauslabel
if (paramidx < 1 || paramidx > adim[1]) {
cstr <- paste("- plausible index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.plausible.labels to see the list of plausible labels.\n")
stop(cstr)
}
}
xx <- mplus.get.bayesian.plausible.data(file,paramidx,obs)
xxmax <- max(xx)
xxmin <- min(xx)
# print(xxmax)
# print(xxmin)
if (obs == 0) {
cstr <- paste(c("Overall distribution of"),labels[paramidx])
} else {
cstr <- sprintf("Distribution of %s for Individual %d", labels[paramidx], obs)
}
h <- hist(xx,breaks=seq(min(xx),max(xx),length=bins+1),col="red",main=cstr,xlab='Estimate',ylab='Count')
xxmode <- h$mids[h$counts == max(h$counts)]
xxmean <- mean(xx)
xxsd <- sd(xx)
xxmedian <- median(xx)
left <- quantile(xx, 0.025,type=3)
right <- quantile(xx, 0.975,type=3)
abline(v=xxmode,untf=FALSE,col='green')
abline(v=xxmean,untf=FALSE,col='brown')
abline(v=xxmedian,untf=FALSE,col='purple')
abline(v=left,untf=FALSE,col='blue')
abline(v=right,untf=FALSE,col='blue')
modestr <- sprintf("Mode = %0.5f", xxmode)
meanstr <- sprintf("Mean = %0.5f, Std Dev = %0.5f", xxmean, xxsd)
medianstr <- sprintf("Median = %0.5f", xxmedian)
lowci <- sprintf("95%% Lower CI = %0.5f", left)
uppci <- sprintf("95%% Upper CI = %0.5f", right)
ldesc <- c(meanstr, medianstr, modestr, lowci, uppci)
lcol <- c('brown','purple','green','blue','blue')
legend("topleft",ldesc,col=lcol,lty=c(1,1,1,1,1),lwd=c(2.5,2.5,2.5,2.5,2.5))
invisible(xx)
}
######################################################################################################
# Functions for LOOP PLOT
######################################################################################################
#========================================================================
#
# mplus.list.loop.labels - list the loop variables
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.loop.labels('ex8.1.gh5')
#
mplus.list.loop.labels <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if loop data exists
if ( !("loop_data" %in% names(gh5)) ) {
stop("mplus.list.loop.labels requires loop data.\n\nUse TYPE=PLOT2 and the PLOT/LOOP keywords in MODEL CONSTRAINT or use the MOD keyword in MODEL INDIRECT.")
}
cat(c("\nList of loop labels to use in the following functions:\n"))
cat(c(" - mplus.plot.loop\n"))
cat(c(" - mplus.get.loop.estimates\n"))
cat(c(" - mplus.get.loop.lowerci\n"))
cat(c(" - mplus.get.loop.upperci\n"))
cat(c(" - mplus.get.loop.xvalues\n"))
cat(c("\nLoop labels:\n"))
# get the parameter statements from loop_data and lookup the indices
statements <- mplus.get.group.attribute(file, 'loop_data', 'labels')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
nplaus <- length(statements)
for (i in c(1:nplaus)) {
cstr <- sprintf("[%d] %s", i, statements[i])
cat(cstr,sep="\n")
}
# cat(statements,sep="\n")
invisible(statements)
}
#========================================================================
#
# mplus.get.loop.estimates - get the estimates for the given loop label
#
# arguments:
# file - the quoted name of an existing GH5 file
# loopstr - the quoted loop label
#
# eg. mplus.get.loop.estimates('ex8.1.gh5','indirect')
#
mplus.get.loop.estimates <- function(file,loopstr=1) {
if (missing(file)) {
stop("- - name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste(" - file does not exist:",file)
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if loop data exists
if ( !("loop_data" %in% names(gh5)) ) {
stop(" - requires loop data\n\nUse TYPE=PLOT2 and the PLOT/LOOP keywords in MODEL CONSTRAINT.")
}
if (missing(loopstr)) {
loopstr=1
}
if (is.character(loopstr)) {
labels <- mplus.get.group.attribute(file,'loop_data','labels')
labels <- gsub("(^\\s+|\\s+$)", "", labels, perl=TRUE)
labels <- tolower(labels)
loopstr <- toupper(loopstr)
loopidx <- pmatch(loopstr, labels, nomatch=0)
if (loopidx == 0) {
cstr <- paste(c("- unknown loop label:"),loopstr,"\n")
stop(cstr)
}
} else {
# get the dimensions of the estimates dataset
# first dimension is the number of loop labels
# second dimension is the number of x points
dims <- attr(gh5$loop_data$estimates,'dim')
loopidx <- loopstr
if (loopidx <= 0 || loopidx > dims[1]) {
cstr <- paste(" - loop index is out of range: ",loopidx,"\n\nUse mplus.list.loop.labels to see the list of loop labels.\n")
stop(cstr)
}
}
gh5$loop_data$estimates[loopidx,]
}
#========================================================================
#
# mplus.get.loop.lowerci - get the lower CI values for the given loop label
#
# arguments:
# file - the quoted name of an existing GH5 file
# loopstr - the quoted loop label
#
# eg. mplus.get.loop.lowerci('ex8.1.gh5','indirect')
#
mplus.get.loop.lowerci <- function(file,loopstr=1) {
if (missing(file)) {
stop("- - name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste(" - file does not exist:",file)
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if loop data exists
if ( !("loop_data" %in% names(gh5)) ) {
stop(" - requires loop data\n\nUse TYPE=PLOT2 and the PLOT/LOOP keywords in MODEL CONSTRAINT.")
}
if (missing(loopstr)) {
loopstr=1
}
if (is.character(loopstr)) {
labels <- mplus.get.group.attribute(file,'loop_data','labels')
labels <- gsub("(^\\s+|\\s+$)", "", labels, perl=TRUE)
labels <- tolower(labels)
loopstr <- tolower(loopstr)
loopidx <- pmatch(loopstr, labels, nomatch=0)
if (loopidx == 0) {
cstr <- paste(c("- unknown loop label:"),loopstr,"\n")
stop(cstr)
}
} else {
# get the dimensions of the estimates dataset
# first dimension is the number of loop labels
# second dimension is the number of x points
dims <- attr(gh5$loop_data$estimates,'dim')
loopidx <- loopstr
if (loopidx <= 0 || loopidx > dims[1]) {
cstr <- paste(" - loop index is out of range: ",loopidx,"\n\nUse mplus.list.loop.labels to see the list of loop labels.\n")
stop(cstr)
}
}
gh5$loop_data$lowerci[loopidx,]
}
#========================================================================
#
# mplus.get.loop.upperci - get the upper CI values for the given loop label
#
# arguments:
# file - the quoted name of an existing GH5 file
# loopstr - the quoted loop label
#
# eg. mplus.get.loop.upperci('ex8.1.gh5','indirect')
#
mplus.get.loop.upperci <- function(file,loopstr=1) {
if (missing(file)) {
stop("- - name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste(" - file does not exist:",file)
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if loop data exists
if ( !("loop_data" %in% names(gh5)) ) {
stop(" - requires loop data\n\nUse TYPE=PLOT2 and the PLOT/LOOP keywords in MODEL CONSTRAINT.")
}
if (missing(loopstr)) {
loopstr=1
}
if (is.character(loopstr)) {
labels <- mplus.get.group.attribute(file,'loop_data','labels')
labels <- gsub("(^\\s+|\\s+$)", "", labels, perl=TRUE)
labels <- tolower(labels)
loopstr <- tolower(loopstr)
loopidx <- pmatch(loopstr, labels, nomatch=0)
if (loopidx == 0) {
cstr <- paste(c("- unknown loop label:"),loopstr,"\n")
stop(cstr)
}
} else {
# get the dimensions of the estimates dataset
# first dimension is the number of loop labels
# second dimension is the number of x points
dims <- attr(gh5$loop_data$estimates,'dim')
loopidx <- loopstr
if (loopidx <= 0 || loopidx > dims[1]) {
cstr <- paste(" - loop index is out of range: ",loopidx,"\n\nUse mplus.list.loop.labels to see the list of loop labels.\n")
stop(cstr)
}
}
gh5$loop_data$upperci[loopidx,]
}
#========================================================================
#
# mplus.get.loop.xvalues - get the x points for the loop plots
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.get.loop.xvalues('ex8.1.gh5')
#
mplus.get.loop.xvalues <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if loop data exists
if ( !("loop_data" %in% names(gh5)) ) {
stop("mplus.get.loop.upperci requires loop data.\n\nUse TYPE=PLOT2 and the PLOT/LOOP keywords in MODEL CONSTRAINT.")
}
gh5$loop_data$xvalues
}
#========================================================================
#
# mplus.plot.loop - plot the loop label
#
# arguments:
# file - the quoted name of an existing GH5 file
# loopvar - the index of the loop label
#
# eg. mplus.plot.loop('ex8.1.gh5',1)
#
mplus.plot.loop <- function(file,loopstr=1) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if loop data exists
if ( !("loop_data" %in% names(gh5)) ) {
stop("requires loop data.\n\nUse TYPE=PLOT2 and the PLOT/LOOP keywords in MODEL CONSTRAINT.")
}
if (missing(loopstr)) {
loopstr=1
}
# get the dimensions of the estimates dataset
# first dimension is the number of loop labels
# second dimension is the number of x points
props <- mplus.get.group.attribute(file,'loop_data','properties')
if (is.character(loopstr)) {
# get the parameter statements from loop_data and lookup the indices
labels <- mplus.get.group.attribute(file, 'loop_data', 'labels')
labels <- gsub("(^\\s+|\\s+$)", "", labels, perl=TRUE)
labels <- tolower(labels)
loopstr <- tolower(loopstr)
loopidx <- pmatch(loopstr, labels, nomatch=0)
if (loopidx == 0) {
cstr <- paste(c("Unknown loop label:"),loopstr,"\n")
stop(cstr)
}
} else {
if (loopstr <= 0 || loopstr > props[1]) {
cstr <- paste("Loop index is out of range: ",loopvar,"\n\nUse mplus.list.loop.labels to see the list of loop labels.\n")
stop(cstr)
}
loopidx <- loopstr
}
labels <- mplus.get.group.attribute(file,'loop_data','labels')
labels <- gsub("(^\\s+|\\s+$)", "", labels, perl=TRUE)
loopvar <- mplus.get.group.attribute(file,'loop_data','loop_variable')
loopvar <- gsub("(^\\s+|\\s+$)", "", loopvar, perl=TRUE)
xx <- array(0,c(3,props[2]))
xx[1,] <- mplus.get.loop.xvalues(file)
xx[2,] <- mplus.get.loop.xvalues(file)
xx[3,] <- mplus.get.loop.xvalues(file)
yy <- array(0,c(3,props[2]))
yy[1,] <- mplus.get.loop.estimates(file,loopidx)
yy[2,] <- mplus.get.loop.lowerci(file,loopidx)
yy[3,] <- mplus.get.loop.upperci(file,loopidx)
# plot the loop
cstr <- paste("Loop plot for",labels[loopidx])
plot(xx,yy,xlab=loopvar,ylab=labels[loopidx],main=cstr,type='n')
lines(xx[1,],yy[1,],col='red')
lines(xx[2,],yy[2,],col='blue')
lines(xx[3,],yy[3,],col='blue')
# abline(v=0,untf=FALSE,col='black')
# abline(h=0,untf=FALSE,col='black')
grid(NULL, NULL, lty=6, col='cornsilk2')
}
######################################################################################################
# Functions for IRT plots
######################################################################################################
#========================================================================
# mplus.list.irt.variables
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.irt.variables('ex7.27.gh5')
#
mplus.list.irt.variables <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if irt data exists
if ( !("irt_data" %in% names(gh5)) ) {
stop("IRT data is required.\n\nUse TYPE=PLOT2.")
}
cat(c("\nList of variables to use in the following functions:\n"))
cat(c(" - mplus.compute.irt.icc\n"))
cat(c(" - mplus.plot.irt.icc\n"))
cat(c("\nVariables for 'uvar' argument:\n"))
ulabels <- mplus.get.group.attribute(file,'irt_data','ulabels')
ulabels <- gsub("(^\\s+|\\s+$)", "", ulabels, perl=TRUE)
nvar <- length(ulabels)
for (i in c(1:nvar)) {
cstr <- sprintf("[%d] %s", i, ulabels[i])
cat(cstr,sep="\n")
}
invisible(ulabels)
}
#========================================================================
# mplus.list.irt.xvariables
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.irt.xvariables('ex7.27.gh5')
#
mplus.list.irt.xvariables <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if irt data exists
if ( !("irt_data" %in% names(gh5)) ) {
stop("IRT data is required.\n\nUse TYPE=PLOT2.")
}
cat(c("\nList of variables to use in the following functions:\n"))
cat(c(" - mplus.compute.irt.icc\n"))
cat(c(" - mplus.plot.irt.icc\n"))
cat(c("\nVariables for the 'xvar' argument:\n"))
flabels <- mplus.get.group.attribute(file,'irt_data','flabels')
flabels <- gsub("(^\\s+|\\s+$)", "", flabels, perl=TRUE)
nvar <- length(flabels)
for (i in c(1:nvar)) {
cstr <- sprintf("[%d] %s", i, flabels[i])
cat(cstr,sep="\n")
}
invisible(flabels)
}
#========================================================================
# mplus.compute.irt.icc
#
# arguments:
# file - the quoted name of an existing GH5 file (required)
# group - the group number (required)
# xvar - the variable for the x-axis, can be the variable index or quoted variable name (required)
# uvar - the indicator variable, can be the variable index or the quoted variable name (required)
# cat - the category number (required)
# xvector -> the vector containing x values to use (required)
# covariates -> the vector containing values for all the other covariates (not required, sample mean used if not given)
#
# eg. mplus.compute.irt.icc('ex7.27.gh5',1,'F','U1',1,seq(-3,3,0.2))
#
mplus.compute.irt.icc <- function(file,group,xvar,uvar,cat,xvector,covariates) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if irt data exists
if ( !("irt_data" %in% names(gh5)) ) {
stop("IRT data is required.\n\nUse TYPE=PLOT2.")
}
# properties[1] - number of factors
# properties[2] - number of factors/covariates
# properties[3] - number of indicators
# properties[4] - number of classes
# properties[5] - maximum number of categories
props <- mplus.get.group.attribute(file,'irt_data','properties')
num_fx <- as.integer(props[2])
num_r <- as.integer(props[3])
max_num_cat <- as.integer(props[5])
flabels <- mplus.get.group.attribute(file,'irt_data','flabels')
flabels <- gsub("(^\\s+|\\s+$)", "", flabels, perl=TRUE)
ulabels <- mplus.get.group.attribute(file,'irt_data','ulabels')
ulabels <- gsub("(^\\s+|\\s+$)", "", ulabels, perl=TRUE)
if (missing(xvar)) {
stop("The x-axis variable (xvar) is required.")
} else {
if (is.character(xvar)) {
xvar <- toupper(xvar)
index <- pmatch(xvar, flabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown x-variable: %s\n", xvar)
stop(cstr)
}
fidx = index
} else {
if (xvar <= 0 || xvar > num_fx) {
stop("The index for the x-variable (xvar) is out of range.")
}
fidx = xvar
}
}
if (missing(uvar)) {
stop("The indicator variable (uvar) is required.")
} else {
if (is.character(uvar)) {
uvar <- toupper(uvar)
index <- pmatch(uvar, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", uvar)
stop(cstr)
}
ridx = index
} else {
if (uvar <= 0 || uvar > num_r) {
stop("The index for the indicator (uvar) is out of range.")
}
ridx = uvar
}
}
if (missing(group)) {
stop("The group index (group) is required.")
} else {
if (group <= 0 || group > props[4]) {
stop("The group index (group) is out of range.")
}
}
if (missing(xvector)) {
stop("The vector (xvector) containing values for the x-axis is required.")
}
if (missing(covariates)) {
means <- mplus.get.group.dataset(file,'irt_data','mean')
covariates <- means[,group]
} else {
if (length(covariates) != num_fx) {
cstr <- sprintf("The length of the covariates vector should be %d.\nFound: %d", num_fx, length(covariates))
stop(cstr)
}
}
links <- mplus.get.group.attribute(file,'categorical_data','link')
shift <- 0.0
for (i in c(1:num_fx)) {
if (i != fidx) {
shift <- shift + covariates[i]*gh5$irt_data$loading[ridx,i,group]
}
}
prob <- array(0,c(length(xvector)))
for (i in c(1:length(xvector))) {
x <- xvector[i]
if (cat == 1) {
p <- gh5$irt_data$tau[cat,ridx,group] - shift - x * gh5$irt_data$loading[fidx,ridx,group]
p <- p * gh5$irt_data$scale[ridx,group]
prob[i] <- lin(p,links[ridx])
} else if (cat == max_num_cat) {
p = gh5$irt_data$tau[cat-1,ridx,group] - shift - x * gh5$irt_data$loading[fidx,ridx,group]
p = p * gh5$irt_data$scale[ridx,group]
prob[i] = 1.0 - lin(p,links[ridx])
} else {
p = gh5$irt_data$tau[cat,ridx,group] - shift - x * gh5$irt_data$loading[fidx,ridx,group]
p = p * gh5$irt_data$scale[ridx,group]
p2 = gh5$irt_data$tau[cat-1,ridx,group] - shift - x * gh5$irt_data$loading[fidx,ridx,group]
p2 = p2 * gh5$irt_data$scale[ridx,group]
prob[i] = lin(p,links[ridx]) - lin(p2,links[ridx])
}
}
prob
}
#========================================================================
# mplus.compute.irt.iic
#
# arguments:
# file - the quoted name of an existing GH5 file (required)
# group - the group number (required)
# xvar - the variable for the x-axis, can be the variable index or quoted variable name (required)
# uvar - the indicator variable, can be the variable index or the quoted variable name (required)
# xvector -> the vector containing x values to use (required)
# covariates -> the vector containing values for all the other covariates (not required, sample mean used if not given)
#
# eg. mplus.compute.irt.iic('ex7.27.gh5',1,'F','U1',seq(-3,3,0.2))
#
mplus.compute.irt.iic <- function(file,group,xvar,uvar,xvector,covariates) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if irt data exists
if ( !("irt_data" %in% names(gh5)) ) {
stop("IRT data is required.\n\nUse TYPE=PLOT2.")
}
# properties[1] - number of factors
# properties[2] - number of factors/covariates
# properties[3] - number of indicators
# properties[4] - number of classes
# properties[5] - maximum number of categories
props <- mplus.get.group.attribute(file,'irt_data','properties')
num_fx <- as.integer(props[2])
num_r <- as.integer(props[3])
max_num_cat <- as.integer(props[5])
if (missing(xvar)) {
stop("The x-axis variable (xvar) is required.")
} else {
if (is.character(xvar)) {
index <- pmatch(xvar, flabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown x-variable: %s\n", xvar)
stop(cstr)
}
fidx = index
} else {
if (xvar <= 0 || xvar > num_fx) {
stop("The index for the x-variable (xvar) is out of range.")
}
fidx = xvar
}
}
if (missing(uvar)) {
stop("The indicator variable (uvar) is required.")
} else {
if (is.character(uvar)) {
index <- pmatch(uvar, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", uvar)
stop(cstr)
}
ridx = index
} else {
if (uvar <= 0 || uvar > num_r) {
stop("The index for the indicator (uvar) is out of range.")
}
ridx = uvar
}
}
if (missing(group)) {
stop("The group index (group) is required.")
} else {
if (group <= 0 || group > props[4]) {
stop("The group index (group) is out of range.")
}
}
if (missing(xvector)) {
stop("The vector (xvector) containing values for the x-axis is required.")
}
if (missing(covariates)) {
covariates <- mplus.get.group.dataset(file,'irt_data','mean')
} else {
if (length(covariates) != num_fx) {
cstr <- sprintf("The length of the covariates vector should be %d.\nFound: %d", num_fx, length(covariates))
stop(cstr)
}
}
categories <- mplus.get.group.attribute(file,'irt_data','categories')
links <- mplus.get.group.attribute(file,'categorical_data','link')
shift <- 0.0
for (i in c(1:num_fx)) {
if (i != fidx) {
shift <- shift + covariates[i]*gh5$irt_data$loading[ridx,i,group]
}
}
categories <- as.numeric(categories)
probvec <- array(0, c(length(xvector),categories[ridx]+1))
for (i in c(1:length(xvector))) {
x <- xvector[i]
probvec[1] <- 0
for (j in c(2:c(categories[ridx]))) {
fp = gh5$irt_data$tau[j-1,ridx,group] - shift - x * gh5$irt_data$loading[fidx,ridx,group]
fp = fp * gh5$irt_data$scale[ridx,group]
dp = lin(fp,links[ridx])
probvec[i,j] <- dp
}
probvec[i,categories[ridx]+1]=1.0
}
prob <- array(0,c(length(xvector)))
for (i in c(1:length(xvector))) {
x <- xvector[i]
for (j in c(2:c(categories[ridx]+1))) {
r <- 10**(-10)
ep = probvec[i,j] - probvec[i,j-1]
if (ep < r) { ep <- r }
dp = gh5$irt_data$scale[ridx,group] * gh5$irt_data$loading[fidx,ridx,group] * gh5$irt_data$scale[ridx,group] * gh5$irt_data$loading[fidx,ridx,group];
p = (probvec[i,j] * (1-probvec[i,j])) - (probvec[i,j-1] * (1-probvec[i,j-1]))
prob[i] <- prob[i] + p * p * dp / ep
}
}
prob
}
#========================================================================
# mplus.plot.irt.icc
#
# arguments:
# file - the quoted name of an existing GH5 file (required)
# group - the group number (not required) -- 1 if not specified
# xvar - the variable for the x-axis, can be the variable index or quoted variable name (not required, uses the first x)
# uvar - the indicator variable or vector containing more than one indicator variable
# - can be the variable index or the quoted variable name
# - if not given, assume all indicator variables but cat must be given (not required)
# cat - the category number
# - if not given, assume all categories for the given indicator variables
# - required if uvar not given
# cat2 - the second category number if range of categories is desired (not required)
# covariates -> the vector containing values for all the other covariates (not required, sample mean used if not given)
# xrange - the type of range for the x-axis (not required)
# - xrange=1: -1 s.d to +1 s.d of xvar
# - xrange=2: -2 s.d to +2 s.d of xvar
# - xrange=3: -3 s.d to +3 s.d of xvar (default)
# - xrange=4: -4 s.d to +4 s.d of xvar
# - xrange=5: -5 s.d to +5 s.d of xvar
# - xrange=6: -6 s.d to +6 s.d of xvar
# xstep - the step increment for the x-axis range (not required)
# - xstep=1: 1.0
# - xstep=2: 0.5
# - xstep=3: 0.1
# - xstep=4: 0.05
# - xstep=5: 1/2 s.d of xvar
# - xstep=6: 1/4 s.d of xvar
# - xstep=7: 1/5 s.d of xvar (default)
# - xstep=8: 1/10 s.d of xvar
# - xstep=9: 1/20 s.d of xvar
# - xstep=10: 1/50 s.d of xvar
# - xstep=11: 1/100 s.d of xvar
#
# eg. mplus.plot.irt.icc('ex7.27.gh5',1,'F','U1',)
#
mplus.plot.irt.icc <- function(file,group=1,xvar=1,uvar,cat,cat2,covariates,xrange=3,xstep=7,lloc="top") {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if irt data exists
if ( !("irt_data" %in% names(gh5)) ) {
stop("This function requires IRT data.\n\nUse TYPE=PLOT2.")
}
# properties[1] - number of factors
# properties[2] - number of factors/covariates
# properties[3] - number of indicators
# properties[4] - number of classes
# properties[5] - maximum number of categories
props <- mplus.get.group.attribute(file,'irt_data','properties')
flabels <- mplus.get.group.attribute(file,'irt_data','flabels')
flabels <- gsub("(^\\s+|\\s+$)", "", flabels, perl=TRUE)
ulabels <- mplus.get.group.attribute(file,'irt_data','ulabels')
ulabels <- gsub("(^\\s+|\\s+$)", "", ulabels, perl=TRUE)
num_fx <- as.integer(props[2])
num_r <- as.integer(props[3])
max_num_cat <- as.integer(props[5])
if (is.character(xvar)) {
xvar <- toupper(xvar)
index <- pmatch(xvar, flabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown variable for the x-axis: %s\n", xvar)
stop(cstr)
}
fidx = index
} else {
if (xvar <= 0 || xvar > num_fx) {
stop("The index for the x-variable (xvar) is out of range.")
}
fidx = xvar
}
if (missing(uvar)) {
} else if (length(uvar) > 1) {
ridx <- vector()
for (r in c(1:length(uvar))) {
var <- uvar[r]
if (is.character(var)) {
var <- toupper(var)
index <- pmatch(var, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", var)
stop(cstr)
}
ridx[r] = index
} else {
if (var <= 0 || var > num_r) {
stop("The index for the indicator in uvar is out of range.")
}
ridx[r] = var
}
}
} else {
if (is.character(uvar)) {
uvar <- toupper(uvar)
index <- pmatch(uvar, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", uvar)
stop(cstr)
}
ridx <- index
} else {
if (uvar <= 0 || uvar > num_r) {
stop("The index for the indicator (uvar) is out of range.")
}
ridx <- uvar
}
}
if (group <= 0 || group > props[4]) {
stop("The group index (group) is out of range.")
}
if (missing(covariates)) {
xmean <- mplus.get.group.dataset(file,'irt_data','mean')
covariates <- xmean[,group]
} else {
if (length(covariates) != num_fx) {
cstr <- sprintf("The length of the covariates vector should be %d.", num_fx)
stop(cstr)
}
}
categories <- mplus.get.group.attribute(file,'irt_data','categories')
if (missing(uvar)) {
# case 1: uvar not specified, we plot ICC for all variables. The category number must be given.
if (missing(cat)) {
stop("The category number (cat) is required when plotting ICCs for all variables.")
}
for (i in c(1:num_r)) {
if (cat <= 0 || cat > categories[i]) {
cstr <- sprintf("The category number (cat) is out of range for variable %s.", ulabels[i])
stop(cstr)
}
}
if (!(missing(cat2))) {
if (cat > cat2) {
cstr <- sprintf("The first category number (cat2=%d) must be smaller than the second category number (cat2=%d).", cat, cat2)
stop(cstr)
}
for (i in c(1:num_r)) {
if (cat2 <= 0 || cat2 > categories[i]) {
cstr <- sprintf("The second category number (cat2) is out of range for variable %s.", ulabels[i])
stop(cstr)
}
}
}
} else if (length(uvar) > 1) {
for (r in c(1:length(ridx))) {
if (!(missing(cat))) {
if (cat <= 0 || cat > categories[ridx[r]]) {
cstr <- sprintf("The category (cat) is out of range for variable %s.", ulabels[ridx[r]])
stop(cstr)
}
} else {
# cat is missing but cat2 isn't!
if (!(missing(cat2))) {
stop("The first category (cat) is required if the second category (cat2) is given.")
}
}
if (!(missing(cat2))) {
if (cat2 <= 0 || cat2 > categories[ridx[r]]) {
cstr <- sprintf("The category (cat2) is out of range for variable %s.", ulabels[ridx[r]])
stop(cstr)
}
if (cat > cat2) {
cstr <- sprintf("The first category (cat2=%d) must be smaller than the second category (cat2=%d).", cat, cat2)
stop(cstr)
}
}
}
} else {
if (!(missing(cat))) {
if (cat <= 0 || cat > categories[ridx]) {
cstr <- sprintf("The category (cat) is out of range for variable %s.", ulabels[ridx])
stop(cstr)
}
} else {
# cat is missing but cat2 isn't!
if (!(missing(cat2))) {
stop("The first category (cat) is required if the second category (cat2) is given.")
}
}
if (!(missing(cat2))) {
if (cat2 <= 0 || cat2 > categories[ridx]) {
cstr <- sprintf("The category (cat2) is out of range for variable %s.", ulabels[ridx])
stop(cstr)
}
if (cat > cat2) {
cstr <- sprintf("The first category (cat2=%d) must be smaller than the second category (cat2=%d).", cat, cat2)
stop(cstr)
}
}
}
if (!(missing(xrange))) {
if (xrange <= 0 || xrange > 6) {
stop("The xrange type should be between 1 and 6.")
}
}
if (!(missing(xstep))) {
if (xstep <= 0 || xstep > 11) {
stop("The xstep type should be between 1 and 11.")
}
}
variances <- mplus.get.group.dataset(file,'irt_data','variance')
means <- mplus.get.group.dataset(file,'irt_data','mean')
fsd = sqrt(variances[fidx])
xmult <- switch(xrange, 1, 2, 3, 4, 5, 6)
vmin = means[fidx] + (-1) * xmult * fsd
vmax = means[fidx] + xmult * fsd
vstep = switch(xstep, 1.0, 0.5, 0.1, 0.05, 0.5*fsd, 0.25*fsd, 0.2*fsd, 0.1*fsd, 0.05*fsd, 0.02*fsd, 0.01*fsd)
steps <- seq(vmin,vmax,by=vstep)
print(steps)
# if cat is missing, then we plot all categories
if (missing(uvar)) {
prob <- array(0,c(num_r,length(steps)))
xx <- array(0,c(num_r,length(steps)))
if (missing(cat2)) {
for (r in c(1:num_r)) {
prob[r,] <- mplus.compute.irt.icc(file,group,fidx,r,cat,xvector=steps,covariates=covariates)
xx[r,] <- steps
}
} else {
for (r in c(1:num_r)) {
for (c in c(cat:cat2)) {
prob[r,] <- prob[r,] + mplus.compute.irt.icc(file,group,fidx,r,c,xvector=steps,covariates=covariates)
}
xx[r,] <- steps
}
}
# plot the icc
cstr <- sprintf("Item characteristic curves as a function of %s, Class %d", flabels[fidx], group)
colors <- rainbow(num_r)
plot(xx,prob,xlab=flabels[fidx],ylab="Probability",main=cstr,type='n')
for (i in c(1:num_r)) {
lines(xx[i,],prob[i,],col=colors[i])
}
ldesc <- array(0,c(num_r))
lty <- array(0,c(num_r))
lwd <- array(0,c(num_r))
for (i in c(1:num_r)) {
if (missing(cat2)) {
ldesc[i] <- sprintf("%s, Category %d", ulabels[i], cat)
} else {
ldesc[i] <- sprintf("%s, Cat %d to %d", ulabels[i], cat, cat2)
}
lty[i] = 1
lwd[i] = 2.5
}
legend(lloc,ldesc,col=colors,lty=lty,lwd=lwd)
} else if (length(ridx) > 1) {
prob <- array(0,c(length(ridx),length(steps)))
xx <- array(0,c(length(ridx),length(steps)))
if (missing(cat)) {
for (j in c(1:categories[ridx])) {
prob[j,] <- mplus.compute.irt.icc(file,group,fidx,ridx,j,xvector=steps,covariates=covariates)
xx[j,] <- steps
}
} else if (missing(cat2)) {
for (r in c(1:length(ridx))) {
prob[r,] <- mplus.compute.irt.icc(file,group,fidx,ridx[r],cat,xvector=steps,covariates=covariates)
xx[r,] <- steps
}
} else {
for (r in c(1:length(ridx))) {
for (c in c(cat:cat2)) {
prob[r,] <- prob[r,] + mplus.compute.irt.icc(file,group,fidx,ridx[r],c,xvector=steps,covariates=covariates)
}
xx[r,] <- steps
}
}
# plot the icc
cstr <- sprintf("Item characteristic curves as a function of %s, Class %d", flabels[fidx], group)
colors <- rainbow(length(ridx))
plot(xx,prob,xlab=flabels[fidx],ylab="Probability",main=cstr,type='n')
for (i in c(1:length(ridx))) {
lines(xx[i,],prob[i,],col=colors[i])
}
ldesc <- array(0,c(length(ridx)))
lty <- array(0,c(length(ridx)))
lwd <- array(0,c(length(ridx)))
for (i in c(1:length(ridx))) {
if (missing(cat2)) {
ldesc[i] <- sprintf("%s, Category %d", ulabels[ridx[i]], cat)
} else {
ldesc[i] <- sprintf("%s, Cat %d to %d", ulabels[ridx[i]], cat, cat2)
}
lty[i] = 1
lwd[i] = 2.5
}
legend(lloc,ldesc,col=colors,lty=lty,lwd=lwd)
} else if (missing(cat)) {
prob <- array(0,c(categories[ridx],length(steps)))
xx <- array(0,c(categories[ridx],length(steps)))
for (j in c(1:categories[ridx])) {
prob[j,] <- mplus.compute.irt.icc(file,group,fidx,ridx,j,steps,covariates)
xx[j,] <- steps
}
# plot the icc
cstr <- sprintf("Item characteristic curve for %s (all categories)\n as a function of %s, Class %d", ulabels[ridx], flabels[fidx], group)
colors <- rainbow(categories[ridx])
plot(xx,prob,xlab=flabels[fidx],ylab="Probability",main=cstr,type='n',ylim=c(0,1))
for (i in c(1:categories[ridx])) {
lines(xx[i,],prob[i,],col=colors[i])
}
ldesc <- vector()
for (i in c(1:categories[ridx])) {
ldesc[i] <- sprintf("%s, Category %d", ulabels[ridx], i)
}
legend(lloc,ldesc,col=colors,lty=c(1,1,1,1,1),lwd=c(2.5,2.5,2.5,2.5,2.5))
} else if (missing(cat2)) {
# if cat2 is missing, then we plot only the given category
prob <- mplus.compute.irt.icc(file,group,fidx,ridx,cat,steps,covariates)
# plot the icc
cstr <- sprintf("Item characteristic curve for %s (category %d)\n as a function of %s, Class %d", ulabels[ridx], cat, flabels[fidx], group)
plot(steps,prob,xlab=flabels[fidx],ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(steps,prob,col='red')
} else {
# if cat and cat2 are given, then we plot the sum from cat to cat2
prob <- array(0,c(length(steps)))
for (c in c(cat:cat2)) {
prob <- prob + mplus.compute.irt.icc(file,group,fidx,ridx,c,steps,covariates)
}
# plot the icc
cstr <- sprintf("Item characteristic curve for %s\n(sum from category %d to category %d)\nas a function of %s, Class %d", ulabels[ridx], cat, cat2, flabels[fidx], group)
plot(steps,prob,xlab=flabels[fidx],ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(steps,prob,col='red')
}
steps
}
#========================================================================
# mplus.plot.irt.iic
#
# arguments:
# file - the quoted name of an existing GH5 file (required)
# group - the group number (not required)
# - if not given, group=1 will be used
# xvar - the variable for the x-axis, can be the variable index or quoted variable name (not required, uses the first x)
# uvar - the indicator variable or vector containing more than one indicator variable
# - can be the variable index or the quoted variable name
# - if not given, assume all indicator variables (not required)
# covariates -> the vector containing values for all the other covariates (not required, sample mean used if not given)
# xrange - the type of range for the x-axis (not required)
# - xrange=1: -1 s.d to +1 s.d of xvar
# - xrange=2: -2 s.d to +2 s.d of xvar
# - xrange=3: -3 s.d to +3 s.d of xvar (default)
# - xrange=4: -4 s.d to +4 s.d of xvar
# - xrange=5: -5 s.d to +5 s.d of xvar
# - xrange=6: -6 s.d to +6 s.d of xvar
# xstep - the step increment for the x-axis range (not required)
# - xstep=1: 1.0
# - xstep=2: 0.5
# - xstep=3: 0.1
# - xstep=4: 0.05
# - xstep=5: 1/2 s.d of xvar
# - xstep=6: 1/4 s.d of xvar
# - xstep=7: 1/5 s.d of xvar (default)
# - xstep=8: 1/10 s.d of xvar
# - xstep=9: 1/20 s.d of xvar
# - xstep=10: 1/50 s.d of xvar
# - xstep=11: 1/100 s.d of xvar
#
# eg. mplus.plot.irt.iic('ex7.27.gh5',1,'F','U1',)
#
mplus.plot.irt.iic <- function(file,group=1,xvar=1,uvar,covariates,xrange=3,xstep=7,lloc="top") {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if irt data exists
if ( !("irt_data" %in% names(gh5)) ) {
stop("This function requires IRT data.\n\nUse TYPE=PLOT2.")
}
# properties[1] - number of factors
# properties[2] - number of factors/covariates
# properties[3] - number of indicators
# properties[4] - number of classes
# properties[5] - maximum number of categories
props <- mplus.get.group.attribute(file,'irt_data','properties')
flabels <- mplus.get.group.attribute(file,'irt_data','flabels')
flabels <- gsub("(^\\s+|\\s+$)", "", flabels, perl=TRUE)
flabels <- tolower(flabels)
ulabels <- mplus.get.group.attribute(file,'irt_data','ulabels')
ulabels <- gsub("(^\\s+|\\s+$)", "", ulabels, perl=TRUE)
ulabels <- tolower(ulabels)
num_fx <- as.integer(props[2])
num_r <- as.integer(props[3])
max_num_cat <- as.integer(props[5])
if (is.character(xvar)) {
xvar <- tolower(xvar)
index <- pmatch(xvar, flabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown variable for the x-axis: %s\n", xvar)
stop(cstr)
}
fidx = index
} else {
if (xvar <= 0 || xvar > num_fx) {
stop("The index for the x-variable (xvar) is out of range.")
}
fidx = xvar
}
if (missing(uvar)) {
} else if (length(uvar) > 1) {
ridx <- vector()
for (r in c(1:length(uvar))) {
var <- uvar[r]
if (is.character(var)) {
index <- pmatch(var, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", var)
stop(cstr)
}
ridx[r] = index
} else {
if (var <= 0 || var > num_r) {
stop("The index for the indicator in uvar is out of range.")
}
ridx[r] = var
}
}
} else {
if (is.character(uvar)) {
uvar <- tolower(uvar)
index <- pmatch(uvar, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", uvar)
stop(cstr)
}
ridx = index
} else {
if (uvar <= 0 || uvar > num_r) {
stop("The index for the indicator (uvar) is out of range.")
}
ridx = uvar
}
}
if (group <= 0 || group > props[4]) {
stop("The group index (group) is out of range.")
}
if (missing(covariates)) {
xmean <- mplus.get.group.dataset(file,'irt_data','mean')
covariates <- xmean[,group]
} else {
if (length(covariates) != num_fx) {
cstr <- sprintf("The length of the covariates vector should be %d.", num_fx)
stop(cstr)
}
}
categories <- mplus.get.group.attribute(file,'irt_data','categories')
if (!(missing(xrange))) {
if (xrange <= 0 || xrange > 6) {
stop("The xrange type should be between 1 and 6.")
}
}
if (!(missing(xstep))) {
if (xstep <= 0 || xstep > 11) {
stop("The xstep type should be between 1 and 11.")
}
}
variances <- mplus.get.group.dataset(file,'irt_data','variance')
means <- mplus.get.group.dataset(file,'irt_data','mean')
fsd = sqrt(variances[fidx])
xmult <- switch(xrange, 1, 2, 3, 4, 5, 6)
vmin = means[fidx] + (-1) * xmult * fsd
vmax = means[fidx] + xmult * fsd
vstep = switch(xstep, 1.0, 0.5, 0.1, 0.05, 0.5*fsd, 0.25*fsd, 0.2*fsd, 0.1*fsd, 0.05*fsd, 0.02*fsd, 0.01*fsd)
steps <- seq(vmin,vmax,by=vstep)
# if cat is missing, then we plot all categories
if (missing(uvar)) {
prob <- array(0,c(num_r,length(steps)))
xx <- array(0,c(num_r,length(steps)))
for (r in c(1:num_r)) {
prob[r,] <- mplus.compute.irt.iic(file,group,fidx,r,xvector=steps,covariates=covariates)
xx[r,] <- steps
}
# plot the iic
cstr <- sprintf("Item information curves as a function of %s, Class %d", flabels[fidx], group)
colors <- rainbow(num_r)
plot(xx,prob,xlab=flabels[fidx],ylab="Information",main=cstr,type='n')
for (i in c(1:num_r)) {
lines(xx[i,],prob[i,],col=colors[i])
}
ldesc <- array(0,c(num_r))
lty <- array(0,c(num_r))
lwd <- array(0,c(num_r))
for (i in c(1:num_r)) {
ldesc[i] <- sprintf("%s", ulabels[i])
lty[i] = 1
lwd[i] = 2.5
}
legend(lloc,ldesc,col=colors,lty=lty,lwd=lwd)
} else if (length(ridx) > 1) {
prob <- array(0,c(length(ridx),length(steps)))
xx <- array(0,c(length(ridx),length(steps)))
for (r in c(1:length(ridx))) {
prob[r,] <- mplus.compute.irt.iic(file,group,fidx,ridx[r],xvector=steps,covariates=covariates)
xx[r,] <- steps
}
# plot the iic
cstr <- sprintf("Item information curves as a function of %s, Class %d", flabels[fidx], group)
colors <- rainbow(length(ridx))
plot(xx,prob,xlab=flabels[fidx],ylab="Information",main=cstr,type='n')
for (i in c(1:length(ridx))) {
lines(xx[i,],prob[i,],col=colors[i])
}
# for (i in c(1:length(steps))) {
# cstr <- sprintf("x = %0.3f, probx = %0.3f", xx[1,i], prob[1,i])
# print(cstr)
# }
ldesc <- array(0,c(length(ridx)))
lty <- array(0,c(length(ridx)))
lwd <- array(0,c(length(ridx)))
for (i in c(1:length(ridx))) {
ldesc[i] <- sprintf("%s", ulabels[ridx[i]])
lty[i] = 1
lwd[i] = 2.5
}
legend(lloc,ldesc,col=colors,lty=lty,lwd=lwd)
} else {
prob <- mplus.compute.irt.iic(file,group,fidx,ridx,steps,covariates)
# for (i in c(1:length(steps))) {
# cstr <- sprintf("x = %0.3f, probx = %0.3f", steps[i], prob[i])
# print(cstr)
# }
# plot the iic
cstr <- sprintf("Item information curve for %s as a function of %s, Class %d", ulabels[ridx], flabels[fidx], group)
plot(steps,prob,xlab=flabels[fidx],ylab="Information",main=cstr,type='n')
lines(steps,prob,col='red')
}
}
#========================================================================
# mplus.plot.irt.tic
#
# arguments:
# file - the quoted name of an existing GH5 file (required)
# group - the group number (not required)
# - if not given, group=1 will be shown
# xvar - the variable for the x-axis, can be the variable index or quoted variable name (not required, uses the first x)
# uvar - the indicator variable or vector containing more than one indicator variable
# - can be the variable index or the quoted variable name
# - if not given, assume all indicator variables (not required)
# covariates -> the vector containing values for all the other covariates (not required, sample mean used if not given)
# xrange - the type of range for the x-axis (not required)
# - xrange=1: -1 s.d to +1 s.d of xvar
# - xrange=2: -2 s.d to +2 s.d of xvar
# - xrange=3: -3 s.d to +3 s.d of xvar (default)
# - xrange=4: -4 s.d to +4 s.d of xvar
# - xrange=5: -5 s.d to +5 s.d of xvar
# - xrange=6: -6 s.d to +6 s.d of xvar
# xstep - the step increment for the x-axis range (not required)
# - xstep=1: 1.0
# - xstep=2: 0.5
# - xstep=3: 0.1
# - xstep=4: 0.05
# - xstep=5: 1/2 s.d of xvar
# - xstep=6: 1/4 s.d of xvar
# - xstep=7: 1/5 s.d of xvar (default)
# - xstep=8: 1/10 s.d of xvar
# - xstep=9: 1/20 s.d of xvar
# - xstep=10: 1/50 s.d of xvar
# - xstep=11: 1/100 s.d of xvar
#
# eg. mplus.plot.irt.tic('ex7.27.gh5',1,'F','U1',)
#
mplus.plot.irt.tic <- function(file,group=1,xvar=1,uvar,covariates,xrange=3,xstep=7) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if irt data exists
if ( !("irt_data" %in% names(gh5)) ) {
stop("This function requires IRT data.\n\nUse TYPE=PLOT2.")
}
# properties[1] - number of factors
# properties[2] - number of factors/covariates
# properties[3] - number of indicators
# properties[4] - number of classes
# properties[5] - maximum number of categories
props <- mplus.get.group.attribute(file,'irt_data','properties')
flabels <- mplus.get.group.attribute(file,'irt_data','flabels')
flabels <- gsub("(^\\s+|\\s+$)", "", flabels, perl=TRUE)
flabels <- tolower(flabels)
ulabels <- mplus.get.group.attribute(file,'irt_data','ulabels')
ulabels <- gsub("(^\\s+|\\s+$)", "", ulabels, perl=TRUE)
ulabels <- tolower(ulabels)
num_fx <- as.integer(props[2])
num_r <- as.integer(props[3])
max_num_cat <- as.integer(props[5])
if (is.character(xvar)) {
index <- pmatch(xvar, flabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown variable for the x-axis: %s\n", xvar)
stop(cstr)
}
fidx = index
} else {
if (xvar <= 0 || xvar > num_fx) {
stop("The index for the x-variable (xvar) is out of range.")
}
fidx = xvar
}
if (missing(uvar)) {
} else if (length(uvar) > 1) {
ridx <- vector()
for (r in c(1:length(uvar))) {
var <- uvar[r]
if (is.character(var)) {
index <- pmatch(var, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", var)
stop(cstr)
}
ridx[r] = index
} else {
if (var <= 0 || var > num_r) {
stop("The index for the indicator in uvar is out of range.")
}
ridx[r] = var
}
}
} else {
if (is.character(uvar)) {
index <- pmatch(uvar, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", uvar)
stop(cstr)
}
ridx = index
} else {
if (uvar <= 0 || uvar > num_r) {
stop("The index for the indicator (uvar) is out of range.")
}
ridx = uvar
}
}
if (group <= 0 || group > props[4]) {
stop("The group index (group) is out of range.")
}
if (missing(covariates)) {
xmean <- mplus.get.group.dataset(file,'irt_data','mean')
covariates <- xmean[,group]
} else {
if (length(covariates) != num_fx) {
cstr <- sprintf("The length of the covariates vector should be %d.", num_fx)
stop(cstr)
}
}
categories <- mplus.get.group.attribute(file,'irt_data','categories')
if (!(missing(xrange))) {
if (xrange <= 0 || xrange > 6) {
stop("The xrange type should be between 1 and 6.")
}
}
if (!(missing(xstep))) {
if (xstep <= 0 || xstep > 11) {
stop("The xstep type should be between 1 and 11.")
}
}
variances <- mplus.get.group.dataset(file,'irt_data','variance')
means <- mplus.get.group.dataset(file,'irt_data','mean')
fsd = sqrt(variances[fidx])
xmult <- switch(xrange, 1, 2, 3, 4, 5, 6)
vmin = means[fidx] + (-1) * xmult * fsd
vmax = means[fidx] + xmult * fsd
vstep = switch(xstep, 1.0, 0.5, 0.1, 0.05, 0.5*fsd, 0.25*fsd, 0.2*fsd, 0.1*fsd, 0.05*fsd, 0.02*fsd, 0.01*fsd)
steps <- seq(vmin,vmax,by=vstep)
# if cat is missing, then we plot all categories
if (missing(uvar)) {
prob <- array(0,c(length(steps)))
for (r in c(1:num_r)) {
prob <- prob + mplus.compute.irt.iic(file,group,fidx,r,xvector=steps,covariates=covariates)
}
prob <- prob + 1 / gh5$irt_data$variance[fidx,group]
# plot the tic
cstr <- sprintf("Total information curve as a function of %s, Class %d", flabels[fidx], group)
plot(steps,prob,xlab=flabels[fidx],ylab="Information",main=cstr,type='n')
lines(steps,prob,col='red')
} else if (length(ridx) > 1) {
prob <- array(0,c(length(steps)))
for (r in c(1:length(ridx))) {
prob <- prob + mplus.compute.irt.iic(file,group,fidx,ridx[r],xvector=steps,covariates=covariates)
}
# plot the iic
cstr <- sprintf("Partial total information curve as a function of %s, Class %d", flabels[fidx], group)
plot(steps,prob,xlab=flabels[fidx],ylab="Information",main=cstr,type='n')
lines(steps,prob,col='red')
} else {
prob <- mplus.compute.irt.iic(file,group,fidx,ridx,steps,covariates)
# plot the tic
cstr <- sprintf("Partial total information curve as a function of %s, Class %d", flabels[fidx], group)
plot(steps,prob,xlab=flabels[fidx],ylab="Information",main=cstr,type='n')
lines(steps,prob,col='red')
}
# for (i in c(1:length(steps))) {
# cstr <- sprintf("x = %0.3f, probx = %0.5f", steps[i], prob[i])
# print(cstr)
# }
}
######################################################################################################
# Functions for Survival plots
######################################################################################################
#========================================================================
# mplus.list.survival.variables
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.survival.variables('ex6.21.gh5')
#
mplus.list.survival.variables <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
# cat(c("\nList of variables to use in the following functions:\n"))
# cat(c(" - mplus.compute.irt.icc\n"))
# cat(c(" - mplus.plot.irt.icc\n"))
cat(c("\nList of survival variables:\n"))
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
cstr <- sprintf("%s", label)
cstr <- gsub("(^\\s+|\\s+$)", "", cstr, perl=TRUE)
print(cstr)
}
}
#========================================================================
# mplus.get.survival.kaplanmeier.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# classnum - the group number (not required)
#
# eg. mplus.get.survival.kaplanmeier.values('ex6.21.gh5','T')
#
mplus.get.survival.kaplanmeier.values <- function(file,survvar,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("kaplan_meier1")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("kaplan_meier%d", classnum)
}
kmvals <- mplus.get.group.dataset(file,groupstr,datastr)
if (missing(time)) {
return(kmvals[,2])
} else {
return(kmvals[,1])
}
}
#========================================================================
# mplus.compute.survival.sample.logcumulative.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# classnum - the group number (not required)
#
# eg. mplus.compute.survival.sample.logcumulative.values('ex6.21.gh5','T')
#
mplus.compute.survival.sample.logcumulative.values <- function(file,survvar,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("kaplan_meier1")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("kaplan_meier%d", classnum)
}
kmvals <- mplus.get.group.dataset(file,groupstr,datastr)
y <- log(-log(kmvals[,2]))
return(y)
}
#========================================================================
# mplus.get.survival.baseline.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# survvar2 - ending survival variable for getting sequential time
# classnum - the group number (not required)
#
# eg. mplus.get.survival.baseline.values('ex6.21.gh5','T')
#
mplus.get.survival.baseline.values <- function(file,survvar,survvar2,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
if (!(missing(survvar2))) {
if (is.character(survvar2)) {
surv_idx2 <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar2) {
surv_idx2 = i
break
}
}
if (surv_idx2 == 0) {
stop("- unknown survival variable: ", survvar2)
}
} else {
if (survvar2 <= 0 || survvar2 > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx2 = survvar2
}
}
if (missing(survvar2)) {
groupstr <- sprintf("survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("estimated_survival")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("estimated_survival%d", classnum)
}
esvals <- mplus.get.group.dataset(file,groupstr,datastr)
if (missing(time)) {
return(esvals[,2])
} else {
return(esvals[,1])
}
} else {
# ending survival variable given so we need to link them sequentially
ylast <- 1
xlast <- 0
data <- vector()
time <- vector()
count <- 0
for (s in c(surv_idx:surv_idx2)) {
groupstr <- sprintf("survival_data/survival%d", s)
if (missing(classnum)) {
datastr <- sprintf("estimated_survival")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("estimated_survival%d", classnum)
}
esvals1 <- mplus.get.group.dataset(file,groupstr,datastr)
if (s == surv_idx) {
count <- length(esvals1[,1])
data[1:count] <- esvals1[,1]
time[1:count] <- estvals[,2]
} else {
n <- length(estvals1[,1])
}
}
}
}
#========================================================================
# mplus.compute.survival.estimated.logcumulative.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# classnum - the group number (not required)
#
# eg. mplus.compute.survival.estimated.logcumulative.values('ex6.21.gh5','T')
#
mplus.compute.survival.estimated.logcumulative.values <- function(file,survvar,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("estimated_survival1")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("estimated_survival%d", classnum)
}
esvals <- mplus.get.group.dataset(file,groupstr,datastr)
y <- log(-log(esvals[,2]))
return(y)
}
#========================================================================
# mplus.get.survival.basehazard.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# classnum - the group number (not required)
#
# eg. mplus.get.survival.basehazard.values('ex6.21.gh5','T')
#
mplus.get.survival.basehazard.values <- function(file,survvar,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("basehazard")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("basehazard%d", classnum)
}
bhvals <- mplus.get.group.dataset(file,groupstr,datastr)
if (missing(time)) {
return(bhvals[,2])
} else {
return(bhvals[,1])
}
}
#========================================================================
# mplus.plot.survival.kaplanmeier
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.survival.kaplanmeier('ex6.21.gh5','T')
#
mplus.plot.survival.kaplanmeier <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Kaplan-Meier curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
npoints[i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(dims[1],maxpoints))
yall <- array(NA, c(dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[i,1:npoints[i]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
yall[i,1:npoints[i]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i)
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(dims[1])
for (i in c(1:dims[1])) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(dims[1]))
lty <- array(0,c(dims[1]))
lwd <- array(0,c(dims[1]))
for (i in c(1:dims[1])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("- class number is out of range")
}
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum,0)
yy <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum)
plot(xx,yy,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(xx,yy,col='red')
}
}
#========================================================================
# mplus.plot.survival.baseline
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.survival.baseline('ex6.21.gh5','T')
#
mplus.plot.survival.baseline <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Estimated baseline survival curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
npoints[i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(dims[1],maxpoints))
yall <- array(NA, c(dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[i,1:npoints[i]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
yall[i,1:npoints[i]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i)
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(dims[1])
for (i in c(1:dims[1])) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(dims[1]))
lty <- array(0,c(dims[1]))
lwd <- array(0,c(dims[1]))
for (i in c(1:dims[1])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
yy <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum)
plot(xx,yy,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(xx,yy,col='red')
}
}
#========================================================================
# mplus.plot.survival.basehazard
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.survival.basehazard('ex6.21.gh5','T')
#
mplus.plot.survival.basehazard <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required.\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Estimated baseline hazard curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.survival.basehazard.values(file,surv_idx,i,0)
npoints[i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(dims[1],maxpoints))
yall <- array(NA, c(dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[i,1:npoints[i]] <- mplus.get.survival.basehazard.values(file,surv_idx,i,0)
yall[i,1:npoints[i]] <- mplus.get.survival.basehazard.values(file,surv_idx,i)
}
plot(xall,yall,xlab="Time",ylab="",main=cstr,type='n')
colors <- rainbow(dims[1])
for (i in c(1:dims[1])) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(dims[1]))
lty <- array(0,c(dims[1]))
lwd <- array(0,c(dims[1]))
for (i in c(1:dims[1])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
xx <- mplus.get.survival.basehazard.values(file,surv_idx,classnum,0)
yy <- mplus.get.survival.basehazard.values(file,surv_idx,classnum)
plot(xx,yy,xlab="Time",ylab="",main=cstr,type='n')
lines(xx,yy,col='red')
}
}
#========================================================================
# mplus.plot.survival.sample.logcumulative
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.survival.sample.logcumulative('ex6.21.gh5','T')
#
mplus.plot.survival.sample.logcumulative <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Sample log cumulative hazard curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
npoints[i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(dims[1],maxpoints))
yall <- array(NA, c(dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[i,1:npoints[i]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
yall[i,1:npoints[i]] <- mplus.compute.survival.sample.logcumulative.values(file,surv_idx,i)
for (j in c(1:npoints[i])) {
if (is.infinite(yall[j])) {
xall[j] = NA
}
}
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(dims[1])
for (i in c(1:dims[1])) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(dims[1]))
lty <- array(0,c(dims[1]))
lwd <- array(0,c(dims[1]))
for (i in c(1:dims[1])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum,0)
yy <- mplus.compute.survival.sample.logcumulative.values(file,surv_idx,classnum)
for (j in c(1:length(xx))) {
if (is.infinite(yy[j])) {
xx[j] = NA
}
}
plot(xx,yy,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(xx,yy,col='red')
}
}
#========================================================================
# mplus.plot.survival.estimated.logcumulative
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.survival.estimated.logcumulative('ex6.21.gh5','T')
#
mplus.plot.survival.estimated.logcumulative <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Estimated log cumulative hazard curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
npoints[i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(dims[1],maxpoints))
yall <- array(NA, c(dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[i,1:npoints[i]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
yall[i,1:npoints[i]] <- mplus.compute.survival.estimated.logcumulative.values(file,surv_idx,i)
for (j in c(1:npoints[i])) {
if (is.infinite(yall[j])) {
xall[j] = NA
}
}
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(dims[1])
for (i in c(1:dims[1])) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(dims[1]))
lty <- array(0,c(dims[1]))
lwd <- array(0,c(dims[1]))
for (i in c(1:dims[1])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
yy <- mplus.compute.survival.estimated.logcumulative.values(file,surv_idx,classnum)
for (j in c(1:length(xx))) {
if (is.infinite(yy[j])) {
xx[j] = NA
}
}
plot(xx,yy,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(xx,yy,col='red')
}
}
#========================================================================
# mplus.plot.survival.kaplanmeier.vs.baseline
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.survival.kaplanmeier.vs.baseline('ex6.21.gh5','T')
#
mplus.plot.survival.kaplanmeier.vs.baseline <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Kaplan-Meier curve compared with\nestimated baseline survival curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(2*dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
npoints[2*(i-1)+1] = length(xx)
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
npoints[2*i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(2*dims[1],maxpoints))
yall <- array(NA, c(2*dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[2*(i-1)+1,1:npoints[2*(i-1)+1]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
yall[2*(i-1)+1,1:npoints[2*(i-1)+1]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i)
xall[2*i,1:npoints[2*i]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
yall[2*i,1:npoints[2*i]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i)
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(2*dims[1])
for (i in c(1:(2*dims[1]))) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(2*dims[1]))
lty <- array(0,c(2*dims[1]))
lwd <- array(0,c(2*dims[1]))
for (i in c(1:dims[1])) {
ldesc[2*(i-1)+1] <- sprintf("KM for Class %d", i)
lty[2*(i-1)+1] = 1
lwd[2*(i-1)+1] = 2.5
ldesc[2*i] <- sprintf("ES for Class %d", i)
lty[2*i] = 1
lwd[2*i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
npoints <- array(0, c(2))
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum,0)
npoints[1] = length(xx)
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
npoints[2] = length(xx)
maxpoints = max(npoints)
xall <- array(NA, c(2,maxpoints))
yall <- array(NA, c(2,maxpoints))
xall[1,1:npoints[1]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum,0)
yall[1,1:npoints[1]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum)
xall[2,1:npoints[2]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
yall[2,1:npoints[2]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum)
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(2)
for (i in c(1:2)) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(2))
lty <- array(0,c(2))
lwd <- array(0,c(2))
ldesc[1] <- sprintf("KM for Class %d", classnum)
lty[1] = 1
lwd[1] = 2.5
ldesc[2] <- sprintf("ES for Class %d", classnum)
lty[2] = 1
lwd[2] = 2.5
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
}
}
#========================================================================
# mplus.plot.survival.sample.vs.estimated.logcumulative
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.survival.sample.vs.estimated.logcumulative('ex6.21.gh5','T')
#
mplus.plot.survival.sample.vs.estimated.logcumulative <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Sample log cumulative hazard curve compared with\nestimated log cumulative baseline hazard curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(2*dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
npoints[2*(i-1)+1] = length(xx)
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
npoints[2*i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(2*dims[1],maxpoints))
yall <- array(NA, c(2*dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[2*(i-1)+1,1:npoints[2*(i-1)+1]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
yall[2*(i-1)+1,1:npoints[2*(i-1)+1]] <- mplus.compute.survival.sample.logcumulative.values(file,surv_idx,i)
xall[2*i,1:npoints[2*i]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
yall[2*i,1:npoints[2*i]] <- mplus.compute.survival.estimated.logcumulative.values(file,surv_idx,i)
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(2*dims[1])
for (i in c(1:(2*dims[1]))) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(2*dims[1]))
lty <- array(0,c(2*dims[1]))
lwd <- array(0,c(2*dims[1]))
for (i in c(1:dims[1])) {
ldesc[2*(i-1)+1] <- sprintf("LC for Class %d", i)
lty[2*(i-1)+1] = 1
lwd[2*(i-1)+1] = 2.5
ldesc[2*i] <- sprintf("ELC for Class %d", i)
lty[2*i] = 1
lwd[2*i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
npoints <- array(0, c(2))
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum,0)
npoints[1] = length(xx)
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
npoints[2] = length(xx)
maxpoints = max(npoints)
xall <- array(NA, c(2,maxpoints))
yall <- array(NA, c(2,maxpoints))
xall[1,1:npoints[1]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum,0)
yall[1,1:npoints[1]] <-mplus.compute.survival.sample.logcumulative.values(file,surv_idx,classnum)
xall[2,1:npoints[2]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
yall[2,1:npoints[2]] <- mplus.compute.survival.estimated.logcumulative.values(file,surv_idx,classnum)
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(2)
for (i in c(1:2)) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(2))
lty <- array(0,c(2))
lwd <- array(0,c(2))
ldesc[1] <- sprintf("LC for Class %d", classnum)
lty[1] = 1
lwd[1] = 2.5
ldesc[2] <- sprintf("ELC for Class %d", classnum)
lty[2] = 1
lwd[2] = 2.5
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
}
}
######################################################################################################
# Functions for Discrete survival plots
######################################################################################################
#========================================================================
# mplus.list.discrete.survival.variables
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.discrete.survival.variables('ex6.21.gh5')
#
mplus.list.discrete.survival.variables <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("discrete_survival_data" %in% names(gh5)) ) {
stop("- discrete survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'discrete_survival_data','properties')
# cat(c("\nList of variables to use in the following functions:\n"))
# cat(c(" - mplus.compute.irt.icc\n"))
# cat(c(" - mplus.plot.irt.icc\n"))
cat(c("\nList of survival variables:\n"))
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
cstr <- sprintf("%s", label)
cstr <- gsub("(^\\s+|\\s+$)", "", cstr, perl=TRUE)
print(cstr)
}
}
#========================================================================
# mplus.get.discrete.survival.kaplanmeier.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# classnum - the group number (not required)
#
# eg. mplus.get.discrete.survival.kaplanmeier.values('ex6.21.gh5','T')
#
mplus.get.discrete.survival.kaplanmeier.values <- function(file,survvar,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("discrete_survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'discrete_survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("discrete_survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("kaplan_meier1")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("kaplan_meier%d", classnum)
}
kmvals <- mplus.get.group.dataset(file,groupstr,datastr)
if (missing(time)) {
return(kmvals[,2])
} else {
return(kmvals[,1])
}
}
#========================================================================
# mplus.get.discrete.survival.baseline.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# survvar2 - ending survival variable for getting sequential time
# classnum - the group number (not required)
#
# eg. mplus.get.discrete.survival.baseline.values('ex6.21.gh5','T')
#
mplus.get.discrete.survival.baseline.values <- function(file,survvar,survvar2,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("discrete_survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'discrete_survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
if (!(missing(survvar2))) {
if (is.character(survvar2)) {
surv_idx2 <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar2) {
surv_idx2 = i
break
}
}
if (surv_idx2 == 0) {
stop("- unknown survival variable: ", survvar2)
}
} else {
if (survvar2 <= 0 || survvar2 > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx2 = survvar2
}
}
if (missing(survvar2)) {
groupstr <- sprintf("discrete_survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("estimated_survival")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("estimated_survival%d", classnum)
}
esvals <- mplus.get.group.dataset(file,groupstr,datastr)
if (missing(time)) {
return(esvals[,2])
} else {
return(esvals[,1])
}
} else {
# ending survival variable given so we need to link them sequentially
ylast <- 1
xlast <- 0
data <- vector()
time <- vector()
count <- 0
for (s in c(surv_idx:surv_idx2)) {
groupstr <- sprintf("discrete_survival_data/survival%d", s)
if (missing(classnum)) {
datastr <- sprintf("estimated_survival")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("estimated_survival%d", classnum)
}
esvals1 <- mplus.get.group.dataset(file,groupstr,datastr)
if (s == surv_idx) {
count <- length(esvals1[,1])
data[1:count] <- esvals1[,1]
time[1:count] <- estvals[,2]
} else {
n <- length(estvals1[,1])
}
}
}
}
#========================================================================
# mplus.get.discrete.survival.basehazard.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# classnum - the group number (not required)
#
# eg. mplus.get.discrete.survival.basehazard.values('ex6.21.gh5','T')
#
mplus.get.discrete.survival.basehazard.values <- function(file,survvar,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("discrete_survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'discrete_survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("discrete_survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("basehazard")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("basehazard%d", classnum)
}
bhvals <- mplus.get.group.dataset(file,groupstr,datastr)
if (missing(time)) {
return(bhvals[,2])
} else {
return(bhvals[,1])
}
}
#========================================================================
# mplus.plot.discrete.survival.kaplanmeier
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.discrete.survival.kaplanmeier('ex6.21.gh5','T')
#
mplus.plot.discrete.survival.kaplanmeier <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("discrete_survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'discrete_survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("discrete_survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Kaplan-Meier curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,i,0)
npoints[i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(dims[1],maxpoints))
yall <- array(NA, c(dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[i,1:npoints[i]] <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,i,0)
yall[i,1:npoints[i]] <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,i)
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(dims[1])
for (i in c(1:dims[1])) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(dims[1]))
lty <- array(0,c(dims[1]))
lwd <- array(0,c(dims[1]))
for (i in c(1:dims[1])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("- class number is out of range")
}
xx <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,classnum,0)
yy <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,classnum)
plot(xx,yy,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(xx,yy,col='red')
}
}
#========================================================================
# mplus.plot.discrete.survival.baseline
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.discrete.survival.baseline('ex6.21.gh5','T')
#
mplus.plot.discrete.survival.baseline <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("discrete_survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'discrete_survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("discrete_survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Estimated baseline survival curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=i,time=0)
npoints[i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(dims[1],maxpoints))
yall <- array(NA, c(dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[i,1:npoints[i]] <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=i,time=0)
yall[i,1:npoints[i]] <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=i)
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(dims[1])
for (i in c(1:dims[1])) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(dims[1]))
lty <- array(0,c(dims[1]))
lwd <- array(0,c(dims[1]))
for (i in c(1:dims[1])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
xx <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
yy <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=classnum)
plot(xx,yy,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(xx,yy,col='red')
}
}
#========================================================================
# mplus.plot.discrete.survival.kaplanmeier.vs.baseline
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.discrete.survival.kaplanmeier.vs.baseline('ex6.21.gh5','T')
#
mplus.plot.discrete.survival.kaplanmeier.vs.baseline <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("discrete_survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'discrete_survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("discrete_survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Kaplan-Meier curve compared with\nestimated baseline survival curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(2*dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,i,0)
npoints[2*(i-1)+1] = length(xx)
xx <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=i,time=0)
npoints[2*i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(2*dims[1],maxpoints))
yall <- array(NA, c(2*dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[2*(i-1)+1,1:npoints[2*(i-1)+1]] <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,i,0)
yall[2*(i-1)+1,1:npoints[2*(i-1)+1]] <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,i)
xall[2*i,1:npoints[2*i]] <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=i,time=0)
yall[2*i,1:npoints[2*i]] <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=i)
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n')
colors <- rainbow(2*dims[1])
for (i in c(1:(2*dims[1]))) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(2*dims[1]))
lty <- array(0,c(2*dims[1]))
lwd <- array(0,c(2*dims[1]))
for (i in c(1:dims[1])) {
ldesc[2*(i-1)+1] <- sprintf("KM for Class %d", i)
lty[2*(i-1)+1] = 1
lwd[2*(i-1)+1] = 2.5
ldesc[2*i] <- sprintf("ES for Class %d", i)
lty[2*i] = 1
lwd[2*i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
npoints <- array(0, c(2))
xx <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,classnum,0)
npoints[1] = length(xx)
xx <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
npoints[2] = length(xx)
maxpoints = max(npoints)
xall <- array(NA, c(2,maxpoints))
yall <- array(NA, c(2,maxpoints))
xall[1,1:npoints[1]] <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,classnum,0)
yall[1,1:npoints[1]] <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,classnum)
xall[2,1:npoints[2]] <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
yall[2,1:npoints[2]] <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=classnum)
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n')
colors <- rainbow(2)
for (i in c(1:2)) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(2))
lty <- array(0,c(2))
lwd <- array(0,c(2))
ldesc[1] <- sprintf("KM for Class %d", classnum)
lty[1] = 1
lwd[1] = 2.5
ldesc[2] <- sprintf("ES for Class %d", classnum)
lty[2] = 1
lwd[2] = 2.5
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
}
}
######################################################################################################
# Supporting functions
######################################################################################################
##########################################################################
#
# mplus.get.group.attribute - supporting function for getting attribute
#
# arguments:
# file - the quoted name of an existing GH5 file
# groupstr - the name of the group for the attribute
# attrstr - the name of the attribute
#
# eg. mplus.get.attribute('ex8.1.gh5','individual_data','var_names')
#
mplus.get.group.attribute <- function(file, groupstr, attrstr) {
if ( !(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
fid <- H5Fopen(file)
gid <- H5Gopen(fid, groupstr)
atid <- H5Aopen(gid, attrstr)
attr <- H5Aread(atid)
H5Aclose(atid)
H5Gclose(gid)
H5Fclose(fid)
attr <- gsub("(^\\s+|\\s+$)", "", attr, perl=TRUE)
return(attr)
}
##########################################################################
#
# mplus.get.group.attribute - supporting function for getting attribute
#
# arguments:
# file - the quoted name of an existing GH5 file
# groupstr - the name of the group for the attribute
# attrstr - the name of the attribute
#
# eg. mplus.get.attribute('ex8.1.gh5','individual_data','var_names')
#
mplus.get.dataset.attribute <- function(file, datastr, attrstr) {
if ( !(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
fid <- H5Fopen(file)
did <- H5Dopen(fid, datastr)
atid <- H5Aopen(did, attrstr)
attr <- H5Aread(atid)
H5Aclose(atid)
H5Dclose(did)
H5Fclose(fid)
return(attr)
}
##########################################################################
#
# mplus.get.group.dataset - supporting function for getting dataset
#
# arguments:
# file - the quoted name of an existing GH5 file
# groupstr - the name of the group for the attribute
# datastr - the name of the attribute
#
# eg. mplus.get.group.dataset('ex8.1.gh5','bayesian_data','statements')
#
mplus.get.group.dataset <- function(file, groupstr, datastr) {
if ( !(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
fid <- H5Fopen(file)
gid <- H5Gopen(fid, groupstr)
dtid <- H5Dopen(gid, datastr)
data <- H5Dread(dtid)
H5Dclose(dtid)
H5Gclose(gid)
H5Fclose(fid)
return(data)
}
estimate_mode <- function(x) {
d <- density(x)
d$x[which.max(d$y)]
}
######################################################################################################
# Math functions
######################################################################################################
lin <- function(y, link) {
if (link == 0) {
x <- logistic(y)
} else {
x <- pnorm(y, mean=0, sd=1)
}
x
}
logistic <- function(y) {
if (y > 50) {
x = 1
} else if (y > -50) {
x = 1 / (1 + exp(-y))
} else {
x = 0
}
x
}
|
/Scripts/MPlus/mplus.R
|
no_license
|
mkelleman/psy564
|
R
| false
| false
| 212,323
|
r
|
# R scripts for extracting and plotting data stored in Mplus graphic
# information in GH5 files. Uses the rhdf5 package for loading the
# the GH5 file.
#
# Version history:
# 2013-09-13 File Version 3 for Mplus Version 7.3
# 2014-04-30 Fix for sample and estimated means.
# 2014-10-07 Fix IRT ICC and IIC functions, turning properties into integers
# 2014-10-08 Add functions for Discrete survival curves
# 2014-11-20 Fix estimated probabilities function, turning categories into integers.
# 2014-11-21 Add legend to plot of estimated probabilities.
#
#
# Written by: Thuy Nguyen
# Muthen & Muthen
#
# Reference:
#
# Bernd Fischer and Gregoire Pau (). rhdf5: HDF5 interface to R. R
# package version 2.4.0.
#
if (require(rhdf5,quietly=TRUE)) {
print("Loaded rhdf5 package")
} else {
print("trying to install rhdf5 from bioconductor.org")
source("http://bioconductor.org/biocLite.R")
biocLite("rhdf5")
if (require(rhdf5)) {
print("Loaded missing rhdf5 package ")
} else {
stop("could not install rhdf5")
}
}
##########################################################################
#
# mplus.view.plots - loads the file and lists all available plots
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.view.plots('ex.gh5')
#
mplus.view.plots <- function(file) {
mplus.load(file)
}
##########################################################################
#
# mplus.load - loads the file and lists all available plots
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.load('ex.gh5')
#
mplus.load <- function(file) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
cat(cstr)
}
gh5 <- h5dump(file, load=TRUE)
cat(c("\nPlot functions:\n"))
if ("individual_data" %in% names(gh5)) {
if (exists("mplus.plot.histogram",mode="function")) {
cat(c(" - mplus.plot.histogram('"),file,"',variable,bins)\n",sep="")
}
if (exists("mplus.plot.scatterplot",mode="function")) {
cat(c(" - mplus.plot.scatterplot('"),file,"',xvar,yvar)\n",sep="")
}
}
if ("process_data" %in% names(gh5) && "means_and_variances_data" %in% names(gh5)) {
np <- length(attr(gh5$process_data,"names"))
for (i in c(1:np)) {
cstr <- paste(c("process"), as.character(i), sep="")
proc <- gh5$process_data[[cstr]]
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",cstr,"", sep="")
prop <- mplus.get.group.attribute(file, cstr2, 'properties')
values <- attr(gh5$means_and_variances_data,"names")
if (prop[1] == 1) {
sm_ind <- pmatch("y_observed_means",values,nomatch=0)
if (sm_ind > 0 && exists("mplus.plot.sample_means",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.sample_means('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("y_estimated_means",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.estimated_means",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.estimated_means('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
if (sm_ind>0 && em_ind>0 && exists("mplus.plot.sample_and_estimated_means",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.sample_and_estimated_means('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("y_estimated_modes",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.estimated_modes",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.estimated_modes('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("y_estimated_medians",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.estimated_medians",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.estimated_medians('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
} else if (prop[1] == 2) {
em_ind <- pmatch("e_estimated_means",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.estimated_means",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.estimated_means('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("e_estimated_modes",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.estimated_modes",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.estimated_modes('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("e_estimated_medians",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.estimated_medians",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.estimated_medians('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
} else if (prop[1] == 3) {
em_ind <- pmatch("observed_probs",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.sample_proportions",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.sample_proportions('"),file,"','",cstr,"',cat1,cat2)\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("estimated_probs",values,nomatch=0)
if (em_ind > 0 && exists("mplus.plot.estimated_probabilities",mode="function")) {
cstr2 <- paste(c(" - mplus.plot.estimated_probabilities('"),file,"','",cstr,"',cat1,cat2)\n",sep="")
cat(cstr2)
}
} else {
cstr2 <- paste(c("'"),cstr,"' has unknown series type.\n")
cat(cstr2)
}
}
}
if ("loop_data" %in% names(gh5)) {
if (exists("mplus.list.loop.labels",mode="function")) {
cat(c(" - mplus.list.loop.labels('"),file,"')\n",sep="")
}
if (exists("mplus.plot.loop",mode="function")) {
cat(c(" - mplus.plot.loop('"),file,"')\n",sep="")
}
}
if ("irt_data" %in% names(gh5)) {
if (exists("mplus.list.irt.variables",mode="function")) {
cat(c(" - mplus.list.irt.variables('"),file,"')\n",sep="")
}
if (exists("mplus.list.irt.xvariables",mode="function")) {
cat(c(" - mplus.list.irt.xvariables('"),file,"')\n",sep="")
}
if (exists("mplus.plot.irt.icc",mode="function")) {
cat(c(" - mplus.plot.irt.icc('"),file,"',group,xvar,uvar,cat,cat2,covariates,xrange,xstep,lloc)\n",sep="")
}
if (exists("mplus.plot.irt.iic",mode="function")) {
cat(c(" - mplus.plot.irt.iic('"),file,"',group,xvar,uvar,covariates,xrange,xstep,lloc)\n",sep="")
}
if (exists("mplus.plot.irt.tic",mode="function")) {
cat(c(" - mplus.plot.irt.tic('"),file,"',group,xvar,uvar,covariates,xrange,xstep)\n",sep="")
}
}
if ("survival_data" %in% names(gh5)) {
if (exists("mplus.plot.survival.kaplanmeier",mode="function")) {
cat(c(" - mplus.plot.survival.kaplanmeier('"),file,"',survvar,classnum)\n",sep="")
}
if (exists("mplus.plot.survival.baseline",mode="function")) {
cat(c(" - mplus.plot.survival.baseline('"),file,"',survvar,classnum)\n",sep="")
}
if (exists("mplus.plot.survival.basehazard",mode="function")) {
cat(c(" - mplus.plot.survival.basehazard('"),file,"',survvar,classnum)\n",sep="")
}
if (exists("mplus.plot.survival.sample.logcumulative",mode="function")) {
cat(c(" - mplus.plot.survival.sample.logcumulative('"),file,"',survvar,classnum)\n",sep="")
}
if (exists("mplus.plot.survival.estimated.logcumulative",mode="function")) {
cat(c(" - mplus.plot.survival.estimated.logcumulative('"),file,"',survar,classnum)\n",sep="")
}
if (exists("mplus.plot.survival.kaplanmeier.vs.baseline",mode="function")) {
cat(c(" - mplus.plot.survival.kaplanmeier.vs.baseline('"),file,"',survvar,classnum)\n",sep="")
}
if (exists("mplus.plot.survival.sample.vs.estimated.logcumulative",mode="function")) {
cat(c(" - mplus.plot.survival.sample.vs.estimated.logcumulative('"),file,"',survvar,classnum)\n",sep="")
}
}
if ("discrete_survival_data" %in% names(gh5)) {
if (exists("mplus.plot.discrete.survival.kaplanmeier",mode="function")) {
cat(c(" - mplus.plot.discrete.survival.kaplanmeier('"),file,"',survvar,classnum)\n",sep="")
}
if (exists("mplus.plot.discrete.survival.baseline",mode="function")) {
cat(c(" - mplus.plot.discrete.survival.baseline('"),file,"',survvar,classnum)\n",sep="")
}
if (exists("mplus.plot.discrete.survival.kaplanmeier.vs.baseline",mode="function")) {
cat(c(" - mplus.plot.discrete.survival.kaplanmeier.vs.baseline('"),file,"',survvar,classnum)\n",sep="")
}
}
if ("bayesian_data" %in% names(gh5)) {
if ("parameters_autocorr" %in% names(gh5$bayesian_data)) {
if ("parameters" %in% names(gh5$bayesian_data$parameters_autocorr)) {
if (exists("mplus.list.bayesian.parameters",mode="function")) {
cat(c(" - mplus.list.bayesian.parameters('"),file,"',parameter)\n",sep="")
}
if (exists("mplus.plot.bayesian.traceplot",mode="function")) {
cat(c(" - mplus.plot.bayesian.traceplot('"),file,"',parameter)\n",sep="")
}
if (exists("mplus.plot.bayesian.distribution",mode="function")) {
cat(c(" - mplus.plot.bayesian.distribution('"),file,"',parameter,bins)\n",sep="")
}
}
if ("priors" %in% names(gh5$bayesian_data$parameters_autocorr)) {
if (exists("mplus.plot.bayesian.prior.distribution",mode="function")) {
cat(c(" - mplus.plot.bayesian.prior.distribution('"),file,"',parameter,bins)\n",sep="")
}
}
if ("autocorrelation" %in% names(gh5$bayesian_data$parameters_autocorr)) {
if (exists("mplus.plot.bayesian.autocorrelation",mode="function")) {
cat(c(" - mplus.plot.bayesian.autocorrelation('"),file,"',parameter,chain)\n",sep="")
}
}
}
if ("predictive" %in% names(gh5$bayesian_data)) {
if (exists("mplus.list.bayesian.predictive.labels",mode="function")) {
cat(c(" - mplus.list.bayesian.predictive.labels('"),file,"')\n",sep="")
}
if ("observed" %in% names(gh5$bayesian_data$predictive) && "replicated" %in% names(gh5$bayesian_data$predictive)) {
if (exists("mplus.plot.bayesian.predictive.scatterplot",mode="function")) {
cat(c(" - mplus.plot.bayesian.predictive.scatterplot('"),file,"',plabel)\n",sep="")
}
if (exists("mplus.plot.bayesian.predictive.distribution",mode="function")) {
cat(c(" - mplus.plot.bayesian.predictive.distribution('"),file,"',plabel,bins)\n",sep="")
}
}
}
if ("plausible" %in% names(gh5$bayesian_data)) {
if (exists("mplus.list.bayesian.plausible.labels",mode="function")) {
cat(c(" - mplus.list.bayesian.plausible.labels('"),file,"')\n",sep="")
}
if (exists("mplus.plot.bayesian.plausible.distribution",mode="function")) {
cat(c(" - mplus.plot.bayesian.plausible.distribution('"),file,"',plauslabel,obs,bins)\n",sep="")
}
}
}
cat(c("\nPlot data extraction functions:\n"))
if ("individual_data" %in% names(gh5)) {
if (exists("mplus.list.variables",mode="function")) {
cat(c(" - mplus.list.variables('"),file,"')\n",sep="")
}
if (exists("mplus.get.data",mode="function")) {
cat(c(" - mplus.get.data('"),file,"',variable)\n",sep="")
}
}
if ("process_data" %in% names(gh5)) {
if (exists("mplus.list.processes",mode="function")) {
cat(c(" - mplus.list.processes('"),file,"')\n",sep="")
}
}
if ("loop_data" %in% names(gh5)) {
if (exists("mplus.get.loop.estimates",mode="function")) {
cat(c(" - mplus.get.loop.estimates('"),file,"',looplabel)\n",sep="")
}
if (exists("mplus.get.loop.lowerci",mode="function")) {
cat(c(" - mplus.get.loop.lowerci('"),file,"',looplabel)\n",sep="")
}
if (exists("mplus.get.loop.upperci",mode="function")) {
cat(c(" - mplus.get.loop.upperci('"),file,"',looplabel)\n",sep="")
}
if (exists("mplus.get.loop.xvalues",mode="function")) {
cat(c(" - mplus.get.loop.xvalues('"),file,"')\n",sep="")
}
}
if ("irt_data" %in% names(gh5)) {
if (exists("mplus.compute.irt.icc",mode="function")) {
cat(c(" - mplus.compute.irt.icc('"),file,"',group,xvar,uvar,cat,xvector,covariates)\n",sep="")
}
if (exists("mplus.compute.irt.iic",mode="function")) {
cat(c(" - mplus.compute.irt.iic('"),file,"',group,xvar,uvar,xvector,covariates)\n",sep="")
}
}
if ("process_data" %in% names(gh5) && "means_and_variances_data" %in% names(gh5)) {
np <- length(attr(gh5$process_data,"names"))
for (i in c(1:np)) {
cstr <- paste(c("process"), as.character(i), sep="")
proc <- gh5$process_data[[cstr]]
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",cstr,"", sep="")
prop <- mplus.get.group.attribute(file, cstr2, 'properties')
values <- attr(gh5$means_and_variances_data,"names")
if (prop[1] == 1) {
sm_ind <- pmatch("y_observed_means",values,nomatch=0)
if (sm_ind > 0 && exists("mplus.get.sample_means",mode="function")) {
cstr2 <- paste(c(" - mplus.get.sample_means('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("y_estimated_means",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.estimated_means",mode="function")) {
cstr2 <- paste(c(" - mplus.get.estimated_means('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("y_estimated_modes",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.estimated_modes",mode="function")) {
cstr2 <- paste(c(" - mplus.get.estimated_modes('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("y_estimated_medians",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.estimated_medians",mode="function")) {
cstr2 <- paste(c(" - mplus.get.estimated_medians('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
} else if (prop[1] == 2) {
em_ind <- pmatch("e_estimated_means",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.estimated_means",mode="function")) {
cstr2 <- paste(c(" - mplus.get.estimated_means('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("e_estimated_modes",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.estimated_modes",mode="function")) {
cstr2 <- paste(c(" - mplus.get.estimated_modes('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("e_estimated_medians",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.estimated_medians",mode="function")) {
cstr2 <- paste(c(" - mplus.get.estimated_medians('"),file,"','",cstr,"')\n",sep="")
cat(cstr2)
}
} else if (prop[1] == 3) {
em_ind <- pmatch("observed_probs",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.sample_proportions",mode="function")) {
cstr2 <- paste(c(" - mplus.get.sample_proportions('"),file,"','",cstr,"',cat1,cat2)\n",sep="")
cat(cstr2)
}
em_ind <- pmatch("estimated_probs",values,nomatch=0)
if (em_ind > 0 && exists("mplus.get.estimated_probabilities",mode="function")) {
cstr2 <- paste(c(" - mplus.get.estimated_probabilities('"),file,"','",cstr,"',cat1,cat2)\n",sep="")
cat(cstr2)
}
} else {
cstr2 <- paste(c("'"),cstr,"' has unknown series type.\n")
cat(cstr2)
}
}
}
if ("survival_data" %in% names(gh5)) {
if (exists("mplus.list.survival.variables",mode="function")) {
cat(c(" - mplus.list.survival.variables('"),file,"')\n",sep="")
}
if (exists("mplus.get.survival.kaplanmeier.values",mode="function")) {
cat(c(" - mplus.get.survival.kaplanmeier.values('"),file,"',survvar,classnum,time)\n",sep="")
}
if (exists("mplus.compute.survival.sample.logcumulative.values",mode="function")) {
cat(c(" - mplus.compute.survival.sample.logcumulative.values('"),file,"',survvar,classnum,time)\n",sep="")
}
if (exists("mplus.get.survival.baseline.values",mode="function")) {
cat(c(" - mplus.get.survival.baseline.values('"),file,"',survvar,survvar2,clasnum,time)\n",sep="")
}
if (exists("mplus.compute.survival.estimated.logcumulative.values",mode="function")) {
cat(c(" - mplus.compute.survival.estimated.logcumulative.values('"),file,"',survvar,classnum,time)\n",sep="")
}
if (exists("mplus.get.survival.basehazard.values",mode="function")) {
cat(c(" - mplus.get.survival.basehazard.values('"),file,"',file,survvar,classnum,time)\n",sep="")
}
}
if ("discrete_survival_data" %in% names(gh5)) {
if (exists("mplus.list.discrete.survival.variables",mode="function")) {
cat(c(" - mplus.list.discrete.survival.variables('"),file,"')\n",sep="")
}
if (exists("mplus.get.discrete.survival.kaplanmeier.values",mode="function")) {
cat(c(" - mplus.get.discrete.survival.kaplanmeier.values('"),file,"',survvar,classnum,time)\n",sep="")
}
if (exists("mplus.get.discrete.survival.baseline.values",mode="function")) {
cat(c(" - mplus.get.discrete.survival.baseline.values('"),file,"',survvar,survvar2,clasnum,time)\n",sep="")
}
}
if ("bayesian_data" %in% names(gh5)) {
if ("parameters_autocorr" %in% names(gh5$bayesian_data)) {
if ("parameters" %in% names(gh5$bayesian_data$parameters_autocorr)) {
if (exists("mplus.get.bayesian.parameter.data",mode="function")) {
cat(c(" - mplus.get.bayesian.parameter.data('"),file,"',parameter,chain)\n",sep="")
}
}
if ("priors" %in% names(gh5$bayesian_data$parameters_autocorr)) {
if (exists("mplus.get.bayesian.prior.parameter.data",mode="function")) {
cat(c(" - mplus.get.bayesian.prior.parameter.data('"),file,"',parameter)\n",sep="")
}
}
if ("autocorrelation" %in% names(gh5$bayesian_data$parameters_autocorr)) {
if (exists("mplus.get.bayesian.autocorrelation",mode="function")) {
cat(c(" - mplus.get.bayesian.autocorrelation('"),file,"',parameter,chain)\n",sep="")
}
}
}
if ("predictive" %in% names(gh5$bayesian_data)) {
if ("observed" %in% names(gh5$bayesian_data$predictive)) {
if (exists("mplus.get.bayesian.predictive.observed",mode="function")) {
cat(c(" - mplus.get.bayesian.predictive.observed('"),file,"',plabel)\n",sep="")
}
}
if ("replicated" %in% names(gh5$bayesian_data$predictive)) {
if (exists("mplus.get.bayesian.predictive.replicated",mode="function")) {
cat(c(" - mplus.get.bayesian.predictive.replicated('"),file,"',plabel)\n",sep="")
}
}
if ("pvalues" %in% names(gh5$bayesian_data$predictive)) {
if (exists("mplus.get.bayesian.predictive.lowerci",mode="function")) {
cat(c(" - mplus.get.bayesian.predictive.lowerci('"),file,"',plabel)\n",sep="")
}
if (exists("mplus.get.bayesian.predictive.upperci",mode="function")) {
cat(c(" - mplus.get.bayesian.predictive.upperci('"),file,"',plabel)\n",sep="")
}
if (exists("mplus.get.bayesian.predictive.pvalue",mode="function")) {
cat(c(" - mplus.get.bayesian.predictive.pvalue('"),file,"',plabel)\n",sep="")
}
if (exists("mplus.get.bayesian.predictive.pvalue_type",mode="function")) {
cat(c(" - mplus.get.bayesian.predictive.pvalue('"),file,"',plabel)\n",sep="")
}
}
}
if ("plausible" %in% names(gh5$bayesian_data)) {
if (exists("mplus.get.bayesian.plausible.data",mode="function")) {
cat(c(" - mplus.get.bayesian.plausible.data('"),file,"',plauslabel,obs)\n",sep="")
}
}
}
invisible(file)
}
##########################################################################
#
# mplus.clear - clears all mplus-related data from a previous mplus_load
#
# arguments: none
#
# eg. mplus.clear()
#
#mplus.clear <- function() {
# cat(c("\nRemoved the following:\n"))
#
# if (exists("matrix_data",)) {
# rm(matrix_data, inherits=TRUE)
# cat(c(" - matrix_data\n"))
# }
# if (exists("process_data",)) {
# rm(process_data, inherits=TRUE)
# cat(c(" - process_data\n"))
# }
# if (exists("class_data")) {
# rm(class_data, inherits=TRUE)
# cat(c(" - class_data\n"))
# }
# if (exists("categorical_data")) {
# rm(categorical_data, inherits=TRUE)
# cat(c(" - categorical_data\n"))
# }
# if (exists("individual_data")) {
# rm(individual_data, inherits=TRUE)
# cat(c(" - individual_data\n"))
# }
# if (exists("means_and_variances_data")) {
# rm(means_and_variances_data, inherits=TRUE)
# cat(c(" - means_and_variances_data\n"))
# }
#}
##########################################################################
#
# mplus.list.processes - list all available processes
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.processes('ex8.1.gh5')
#
mplus.list.processes <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
if (!("process_data" %in% names(gh5))) {
stop("mplus.list.proceses requires series information.\n\nUse the SERIES option in Mplus to specify series information.\n")
}
cat(c("\nList of process names to use in the following functions:\n"))
cat(c(" - mplus.plot.sample_means\n"))
cat(c(" - mplus.plot.estimated_means\n"))
cat(c(" - mplus.plot.sample_and_estimated_means\n"))
cat(c(" - mplus.plot.sample_proportions\n"))
cat(c(" - mplus.plot.estimated_probabilities\n"))
cat(c(" - mplus.get.sample_means\n"))
cat(c(" - mplus.get.estimated_means\n"))
cat(c(" - mplus.get.sample_proportions\n"))
cat(c(" - mplus.get.estimated_probabilities\n"))
cat(c("\nProcesses:\n"))
allpnames <- attr(gh5$process_data,"names")
allpnames
}
##########################################################################
#
# mplus.plot.estimated_means - plot estimated means for the quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
#
# eg. mplus.plot.estimated_means('ex8.1.gh5','process1')
#
mplus.plot.estimated_means <-function(file,procstr='process1',ptype='o') {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated means.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file, cstr2, 'properties')
series_type <- prop[1]
if ( ! (series_type == 1 || series_type == 2) ) {
cstr <- paste("- process does not have estimated means:",procstr,"\n\n")
stop(cstr)
}
# get the time scores
xx <- proc$time_scores
# set up the array for the estimated means
dims <- attr(proc$time_scores,"dim")
yy <- mplus.get.estimated_means(file,procstr)
# plot the means
cstr <- paste("Estimated means for",procstr)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n')
symb <- array(c(21,22,23,24,25),c(dims[1]))
colors <- rainbow(dims[2])
for (i in c(1:dims[2])) {
lines(xx[,i],yy[,i],type=ptype,pch=symb[i],col=colors[i])
}
ldesc <- array(0,c(dims[2]))
lty <- array(0,c(dims[2]))
lwd <- array(0,c(dims[2]))
lcol <- array(0,c(dims[2]))
for (i in c(1:dims[2])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
lcol[i] <- colors[i]
}
legend('bottomright',col=lcol,pch=symb,ldesc,lty=lty,lwd=lwd)
}
##########################################################################
#
# mplus.plot.estimated_modes - plot estimated modes for the quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
#
# eg. mplus.plot.estimated_modes('ex8.1.gh5','process1')
#
mplus.plot.estimated_modes <-function(file,procstr='process1',ptype='o') {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated modes.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file, cstr2, 'properties')
series_type <- prop[1]
if ( ! (series_type == 1 || series_type == 2) ) {
cstr <- paste("- process does not have estimated modes:",procstr,"\n\n")
stop(cstr)
}
# get the time scores
xx <- proc$time_scores
# set up the array for the estimated means
dims <- attr(proc$time_scores,"dim")
yy <- mplus.get.estimated_modes(file,procstr)
# plot the means
cstr <- paste("Estimated modes for",procstr)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n')
symb <- array(c(21,22,23,24,25),c(dims[1]))
colors <- rainbow(dims[2])
for (i in c(1:dims[2])) {
lines(xx[,i],yy[,i],type=ptype,pch=symb[i],col=colors[i])
}
ldesc <- array(0,c(dims[2]))
lty <- array(0,c(dims[2]))
lwd <- array(0,c(dims[2]))
lcol <- array(0,c(dims[2]))
for (i in c(1:dims[2])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
lcol[i] <- colors[i]
}
legend('bottomright',col=lcol,pch=symb,ldesc,lty=lty,lwd=lwd)
}
##########################################################################
#
# mplus.plot.estimated_medians - plot estimated medians for the quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
#
# eg. mplus.plot.estimated_medians('ex8.1.gh5','process1')
#
mplus.plot.estimated_medians <-function(file,procstr='process1',ptype='o') {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated medians.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file, cstr2, 'properties')
series_type <- prop[1]
if ( ! (series_type == 1 || series_type == 2) ) {
cstr <- paste("- process does not have estimated medians:",procstr,"\n\n")
stop(cstr)
}
# get the time scores
xx <- proc$time_scores
# set up the array for the estimated means
dims <- attr(proc$time_scores,"dim")
yy <- mplus.get.estimated_medians(file,procstr)
# plot the means
cstr <- paste("Estimated medians for",procstr)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n')
symb <- array(c(21,22,23,24,25),c(dims[1]))
colors <- rainbow(dims[2])
for (i in c(1:dims[2])) {
lines(xx[,i],yy[,i],type=ptype,pch=symb[i],col=colors[i])
}
ldesc <- array(0,c(dims[2]))
lty <- array(0,c(dims[2]))
lwd <- array(0,c(dims[2]))
lcol <- array(0,c(dims[2]))
for (i in c(1:dims[2])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
lcol[i] <- colors[i]
}
legend('bottomright',col=lcol,pch=symb,ldesc,lty=lty,lwd=lwd)
}
##########################################################################
#
# mplus.plot.sample_means - plot sample means for the quoted process
#
# arguments:
# procstr - the quoted name of a series
#
# eg. mplus.plot.sample_means('ex6.1.gh5','process1')
#
mplus.plot.sample_means <-function(file,procstr='process1',ptype='o') {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith sample means.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file, cstr2, 'properties')
series_type <- prop[1]
if ( ! (series_type == 1) ) {
cstr <- paste("- process does not have sample means:",procstr,"\n\n")
stop(cstr)
}
# get the time scores
xx <- proc$time_scores
# set up the array for the estimated means
dims <- attr(proc$time_scores,"dim")
yy <- mplus.get.sample_means(file,procstr)
# plot the means
cstr <- paste("Sample means for",procstr)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n')
symb <- array(c(21,22,23,24,25),c(dims[1]))
colors <- rainbow(dims[2])
for (i in c(1:dims[2])) {
lines(xx[,i],yy[,i],type=ptype,pch=symb[i],col=colors[i])
}
ldesc <- array(0,c(dims[2]))
lty <- array(0,c(dims[2]))
lwd <- array(0,c(dims[2]))
lcol <- array(0,c(dims[2]))
for (i in c(1:dims[2])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
lcol[i] <- colors[i]
}
legend('bottomright',col=lcol,pch=symb,ldesc,lty=lty,lwd=lwd)
}
##########################################################################
#
# mplus.plot.sample_and_estimated_means - plot sample and estimated means for the
# quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
#
# eg. mplus.plot.sample_and_estimated_means('process1')
#
mplus.plot.sample_and_estimated_means <-function(file,procstr='process1',ptype='o') {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith sample and estimated means.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( ! (series_type == 1) ) {
cstr <- paste("- process does not have sample means:",procstr,"\n\n")
stop(cstr)
}
# get the dimensions of the time_scores array and create an array with twice the size of the
# first dimension
dims <- attr(proc$time_scores,"dim")
xx <- array(0, c(dims[1],2*dims[2]))
yy <- array(0, c(dims[1],2*dims[2]))
samp <- mplus.get.sample_means(file,procstr)
emean <- mplus.get.estimated_means(file,procstr)
for (i in c(1:dims[1])) {
for (j in c(1:dims[2])) {
# set the time scores and pick up sample means
xx[i,2*j-1] <- proc$time_scores[i,j]
yy[i,2*j-1] <- samp[i,j]
# set the time scores and pick up estimated means
xx[i,2*j] <- proc$time_scores[i,j]
yy[i,2*j] <- emean[i,j]
}
}
# plot the means
cstr <- paste("Sample and estimated means for",procstr)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n')
symb <- array(c(21,22,23,24,25),c(dims[2]))
colors <- rainbow(dims[2])
for (i in c(1:dims[2])) {
lines(xx[,2*i-1],yy[,2*i-1],type=ptype,pch=symb[i],col=colors[i])
lines(xx[,2*i],yy[,2*i],type=ptype,pch=symb[i],col=colors[i])
}
ldesc <- array(0,c(2*dims[2]))
lty <- array(0,c(2*dims[2]))
lwd <- array(0,c(2*dims[2]))
lcol <- array(0,c(2*dims[2]))
lsymb <- array(0,c(2*dims[2]))
for (i in c(1:dims[2])) {
ldesc[2*i-1] <- sprintf("Sample means, Class %d", i)
lty[2*i-1] = 1
lwd[2*i-1] = 2.5
lsymb[2*i-1] <- symb[i]
lcol[2*i] <- colors[i]
ldesc[2*i] <- sprintf("Estimated means, Class %d", i)
lty[2*i] = 1
lwd[2*i] = 2.5
lcol[2*i] <- colors[i]
lsymb[2*i] <- symb[i]
}
legend('bottomright',col=lcol,pch=lsymb,ldesc,lty=lty,lwd=lwd)
}
##########################################################################
#
# mplus.plot.estimated_probabilities - plot estimated probabilities for the
# quoted process, summing up probabilities of the first to the last category
# chosen
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
# cat1 - the first category to include
# cat2 - the last category to include
#
# eg. mplus.plot.estimated_probabilities('ex8.4.gh5','process1',1,1)
#
mplus.plot.estimated_probabilities <- function(file,procstr='process1',cat1=1,cat2=1,ptype='o') {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated probabilities.\n")
}
# if cat2 is missing and cat1 is given, then we should assign cat2 to cat1.
if (missing(cat2)) {
if (!(missing(cat1))) {
cat2 <- cat1
}
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( !(series_type == 3) ) {
cstr <- paste("- process does not have estimated probabilities:",procstr,"\n\n")
stop(cstr)
}
# get the time scores
xx <- proc$time_scores
# set up the array for the estimated probabilities
dims <- attr(proc$time_scores,"dim")
yy <- mplus.get.estimated_probabilities(file,procstr,cat1,cat2)
# plot the probabilities
cstr <- paste("Estimated probabilities for",procstr)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n',ylim=c(0:1))
symb <- array(c(21,22,23,24,25),c(dims[1]))
colors <- rainbow(dims[2])
for (i in c(1:dims[2])) {
lines(xx[,i],yy[,i],type=ptype,pch=symb[i],col=colors[i])
}
ldesc <- array(0,c(dims[2]))
lty <- array(0,c(dims[2]))
lwd <- array(0,c(dims[2]))
lcol <- array(0,c(dims[2]))
for (i in c(1:dims[2])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
lcol[i] <- colors[i]
}
legend('bottomright',col=lcol,pch=symb,ldesc,lty=lty,lwd=lwd)
}
##########################################################################
#
# mplus.plot.sample_proportions - plot sample proportions for the
# quoted process, summing up proportions of the first to the last category
# chosen
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
# cat1 - the first category to include
# cat2 - the last category to include
#
# eg. mplus.plot.sample_proportions('ex8.4.gh5','process1',1,1)
#
mplus.plot.sample_proportions <-function(file,procstr='process1',cat1=1,cat2=1,ptype='o') {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith sample proportions.\n")
}
# if cat2 is missing and cat1 is given, then we should assign cat2 to cat1.
if (missing(cat2)) {
if (!(missing(cat1))) {
cat2 <- cat1
}
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
cat(cstr)
return(invisible(cstr))
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( !(series_type == 3) ) {
cstr <- paste("- process does not have sample proportions:",procstr,"\n\n")
stop(cstr)
}
# get the time scores
xx <- proc$time_scores
# set up the array for the sample proportions
dims <- attr(proc$time_scores,"dim")
# dims[1] is the number of time points, dims[2] is the number of classes
yy <- mplus.get.sample_proportions(file,procstr,cat1,cat2)
# plot the proportions
cstr <- paste("Sample proportions for",procstr)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n',ylim=c(0:1))
symb <- array(c(21,22,23,24,25),c(dims[1]))
for (k in c(1:dims[2])) {
lines(xx[,k],yy[,k],type=ptype,pch=symb[k])
}
}
##########################################################################
#
# mplus.get.estimated_means - plot estimated means for the quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file, required
# procstr - the quoted name of a series, not required. Defaults to 'process1' (the first process)
# classidx - the class index, not required - 0 for all classes. Default to 0.
#
# eg. mplus.get.estimated_means('ex8.1.gh5','process1',3)
#
mplus.get.estimated_means <-function(file,procstr='process1',classidx=0) {
if (missing(file)) {
stop(" - name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated means.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( ! (series_type == 1 || series_type == 2) ) {
cstr <- paste("- process does not have estimated means:",procstr,"\n\n")
stop(cstr)
}
# set up the array for the estimated means
dims <- attr(proc$time_scores,"dim")
# if all classes, dimension it by number of classes. Otherwise, just dimension by 1.
if (classidx == 0) {
yy <- array(0, c(dims[1],dims[2]))
} else {
# check that the classidx is within range.
if (classidx < 0 || classidx > dims[2]) {
cstr <- paste("- classidx is out of range, 1 to ",dims[2],": ",classidx,"\n\n")
stop(cstr)
}
yy <- array(0, c(dims[1],1))
}
# get the indices of variables in the series
var_names <- mplus.get.group.attribute(file,cstr2,'var_names')
if (series_type == 1) {
mean_vars <- mplus.get.group.attribute(file,'means_and_variances_data/y_estimated_means','variables')
} else {
mean_vars <- mplus.get.group.attribute(file,'means_and_variances_data/e_estimated_means','variables')
}
var_indices <- pmatch(var_names, mean_vars, nomatch=0)
# type 1 is estimated means for observed variables
if (series_type == 1) {
if (classidx == 0) {
for (i in c(1:dims[2])) {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_estimated_means$values[var_indices[j],i]
}
}
} else {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_estimated_means$values[var_indices[j],classidx]
}
}
}
# type 2 is estimated means for latent variables
if (series_type == 2) {
if (classidx == 0) {
for (i in c(1:dims[2])) {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$e_estimated_means$values[var_indices[j],i]
}
}
} else {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$e_estimated_means$values[var_indices[j],classidx]
}
}
}
# return the means
return(yy)
}
##########################################################################
#
# mplus.get.sample_means - return sample means for the quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file, required
# procstr - the quoted name of a series, not required. Defaults to 'process1' (the first process)
# classidx - the class index, not required - 0 for all classes. Default to 0.
#
# eg. mplus.get.sample_means('ex8.1.gh5','process1',3)
#
mplus.get.sample_means <- function(file,procstr='process1',classidx=0) {
if (missing(file)) {
stop("- the name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information.\n\nUse the SERIES option in Mplus to specify series information for processes\nwith sample means.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( ! (series_type == 1) ) {
cstr <- paste("- process does not have sample means:",procstr,"\n\n")
stop(cstr)
}
# get the time scores
xx <- proc$time_scores
# set up the array for the estimated means
dims <- attr(proc$time_scores,"dim")
# if all classes, dimension it by number of classes. Otherwise, just dimension by 1.
if (classidx == 0) {
yy <- array(0, c(dims[1],dims[2]))
} else {
# check that the classidx is within range.
if (classidx < 0 || classidx > dims[2]) {
cstr <- paste("- classidx is out of range, 1 to ",dims[2],": ",classidx,"\n\n")
stop(cstr)
}
yy <- array(0, c(dims[1],1))
}
# get the indices of variables in the series
var_names <- mplus.get.group.attribute(file,cstr2,'var_names')
mean_vars <- mplus.get.group.attribute(file,'means_and_variances_data/y_observed_means','variables')
var_indices <- pmatch(var_names, mean_vars, nomatch=0)
# only type 1 has sample means
if (classidx == 0) {
for (i in c(1:dims[2])) {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_observed_means$values[var_indices[j],i]
}
}
} else {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_observed_means$values[var_indices[j],classidx]
}
}
# return the means
return(yy)
}
##########################################################################
#
# mplus.get.estimated_modes - plot estimated modes for the quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file, required
# procstr - the quoted name of a series, not required. Defaults to 'process1' (the first process)
# classidx - the class index, not required - 0 for all classes. Default to 0.
#
# eg. mplus.get.estimated_modes('ex8.1.gh5','process1',3)
#
mplus.get.estimated_modes <-function(file,procstr='process1',classidx=0) {
if (missing(file)) {
stop(" - name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated modes.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( ! (series_type == 1 || series_type == 2) ) {
cstr <- paste("- process does not have estimated modes:",procstr,"\n\n")
stop(cstr)
}
# set up the array for the estimated modes
dims <- attr(proc$time_scores,"dim")
# if all classes, dimension it by number of classes. Otherwise, just dimension by 1.
if (classidx == 0) {
yy <- array(0, c(dims[1],dims[2]))
} else {
# check that the classidx is within range.
if (classidx < 0 || classidx > dims[2]) {
cstr <- paste("- classidx is out of range, 1 to ",dims[2],": ",classidx,"\n\n")
stop(cstr)
}
yy <- array(0, c(dims[1],1))
}
# get the indices of variables in the series
var_names <- mplus.get.group.attribute(file,cstr2,'var_names')
if (series_type == 1) {
mean_vars <- mplus.get.group.attribute(file,'means_and_variances_data/y_estimated_modes','variables')
} else {
mean_vars <- mplus.get.group.attribute(file,'means_and_variances_data/e_estimated_modes','variables')
}
var_indices <- pmatch(var_names, mean_vars, nomatch=0)
# type 1 is estimated means for observed variables
if (series_type == 1) {
if (classidx == 0) {
for (i in c(1:dims[2])) {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_estimated_modes$values[var_indices[j],i]
}
}
} else {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_estimated_modes$values[var_indices[j],classidx]
}
}
}
# type 2 is estimated means for latent variables
if (series_type == 2) {
if (classidx == 0) {
for (i in c(1:dims[2])) {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$e_estimated_modes$values[var_indices[j],i]
}
}
} else {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$e_estimated_modes$values[var_indices[j],classidx]
}
}
}
# return the modes
return(yy)
}
##########################################################################
#
# mplus.get.estimated_medians - plot estimated medians for the quoted process
#
# arguments:
# file - the quoted name of an existing GH5 file, required
# procstr - the quoted name of a series, not required. Defaults to 'process1' (the first process)
# classidx - the class index, not required - 0 for all classes. Default to 0.
#
# eg. mplus.get.estimated_medians('ex8.1.gh5','process1',3)
#
mplus.get.estimated_medians <-function(file,procstr='process1',classidx=0) {
if (missing(file)) {
stop(" - name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated medians.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( ! (series_type == 1 || series_type == 2) ) {
cstr <- paste("- process does not have estimated medians:",procstr,"\n\n")
stop(cstr)
}
# set up the array for the estimated medians
dims <- attr(proc$time_scores,"dim")
# if all classes, dimension it by number of classes. Otherwise, just dimension by 1.
if (classidx == 0) {
yy <- array(0, c(dims[1],dims[2]))
} else {
# check that the classidx is within range.
if (classidx < 0 || classidx > dims[2]) {
cstr <- paste("- classidx is out of range, 1 to ",dims[2],": ",classidx,"\n\n")
stop(cstr)
}
yy <- array(0, c(dims[1],1))
}
# get the indices of variables in the series
var_names <- mplus.get.group.attribute(file,cstr2,'var_names')
if (series_type == 1) {
mean_vars <- mplus.get.group.attribute(file,'means_and_variances_data/y_estimated_medians','variables')
} else {
mean_vars <- mplus.get.group.attribute(file,'means_and_variances_data/e_estimated_medians','variables')
}
var_indices <- pmatch(var_names, mean_vars, nomatch=0)
# type 1 is estimated means for observed variables
if (series_type == 1) {
if (classidx == 0) {
for (i in c(1:dims[2])) {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_estimated_medians$values[var_indices[j],i]
}
}
} else {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$y_estimated_medians$values[var_indices[j],classidx]
}
}
}
# type 2 is estimated means for latent variables
if (series_type == 2) {
if (classidx == 0) {
for (i in c(1:dims[2])) {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$e_estimated_medians$values[var_indices[j],i]
}
}
} else {
for (j in c(1:dims[1])) {
yy[j,i] <- gh5$means_and_variances_data$e_estimated_medians$values[var_indices[j],classidx]
}
}
}
# return the modes
return(yy)
}
##########################################################################
#
# mplus.get.time_scores - return time scores for the quoted process
#
# arguments:
# procstr - the quoted name of a series
#
# eg. mplus.get.time_scores('ex6.1.gh5', 'process1')
#
mplus.get.time_scores <- function(file,procstr='process1') {
if (missing(file)) {
stop("- the name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information.\n\nUse the SERIES option in Mplus to specify series information for processes\nwith sample means.\n")
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
# Replace the line below with series of low-level function calls
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
# get the time scores
xx <- proc$time_scores
return(xx)
}
##########################################################################
#
# mplus.get.estimated_probabilities - return estimated probabilities for the
# quoted process, summing up probabilities of the first to the last category
# chosen
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
# cat1 - the first category to include
# cat2 - the last category to include
#
# eg. mplus.get.estimated_probabilities('ex8.4.gh5','process1',1,1)
#
mplus.get.estimated_probabilities <- function(file,procstr='process1',cat1=1,cat2=1) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith estimated probabilities.\n")
}
# if cat2 is missing and cat1 is given, then we should assign cat2 to cat1.
if (missing(cat2)) {
if (!(missing(cat1))) {
cat2 <- cat1
}
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( !(series_type == 3) ) {
cstr <- paste("- process does not have estimated probabilities:",procstr,"\n\n")
stop(cstr)
}
# set up the array for the estimated probabilities
dims <- attr(proc$time_scores,"dim")
yy <- array(0, c(dims[1],dims[2]))
# get indices and names of the variables in the series
var_indices <- mplus.get.group.attribute(file,cstr2,'var_indices')
var_names <- mplus.get.group.attribute(file,cstr2,'var_names')
# get categorical data information then look up the variables in the process
# in categorical_data so we can get the number of categories for each variable in the process
# this would be achieved by categories[cat_indices[i]] for variable i in the process
categories <- mplus.get.group.attribute(file,'categorical_data','categories')
catvars <- mplus.get.group.attribute(file,'categorical_data','var_names')
cat_indices <- pmatch(var_names, catvars, nomatch=0)
# get the probabilities
for (i in c(1:dims[1])) {
for (j in c(1:dims[2])) {
start_index <- sum(as.integer(categories[1:cat_indices[i]-1]))
startk <- cat1 + start_index
endk <- cat2 + start_index
yy[i,j] <- sum(gh5$means_and_variances_data$estimated_probs$values[startk:endk,j])
}
}
# return the probabilities
return(yy);
}
##########################################################################
#
# mplus.get.sample_proportions - return sample proportions for the
# quoted process, summing up proportions of the first to the last category
# chosen
#
# arguments:
# file - the quoted name of an existing GH5 file
# procstr - the quoted name of a series
# cat1 - the first category to include
# cat2 - the last category to include
#
# eg. mplus.get.sample_proportions('ex8.4.gh5','process1',1,1)
#
mplus.get.sample_proportions <-function(file,procstr='process1',cat1=1,cat2=1) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
# check that the series exists
if (!("process_data" %in% names(gh5))) {
stop("- requires series information\n\nUse the SERIES option in Mplus to specify series information for processes\nwith sample proportions.\n")
}
if (missing(procstr)) {
stop("- requires the name of a series\n\nUse mplus.list.processes() to get the list of series processes.")
}
# if cat2 is missing and cat1 is given, then we should assign cat2 to cat1.
if (missing(cat2)) {
if (!(missing(cat1))) {
cat2 <- cat1
}
}
allpnames <- attr(gh5$process_data,"names")
pind <- pmatch(procstr, allpnames, nomatch=0)
if (pind == 0) {
cstr <- paste("- process does not exist:",procstr,"\n\n")
stop(cstr)
}
# get the process
proc <- gh5$process_data[[procstr]]
# get the series type in properties
cstr2 <- paste(c("process_data"),"/",procstr,"", sep="")
prop <- mplus.get.group.attribute(file,cstr2,'properties')
series_type <- prop[1]
if ( ! (series_type == 3) ) {
cstr <- paste("- process does not have sample proportions:",procstr,"\n\n")
stop(cstr)
}
# set up the array for the sample proportions
dims <- attr(proc$time_scores,"dim")
# dims[1] is the number of time points, dims[2] is the number of classes
yy <- array(0, c(dims[1],dims[2]))
# get indices and names of the variables in the series
var_indices <- mplus.get.group.attribute(file,cstr2,'var_indices')
var_names <- mplus.get.group.attribute(file,cstr2,'var_names')
# get categorical data information then look up the variables in the process
# in categorical_data so we can get the number of categories for each variable in the process
# this would be achieved by categories[cat_indices[i]] for variable i in the process
categories <- mplus.get.group.attribute(file,'categorical_data','categories')
catvars <- mplus.get.group.attribute(file,'categorical_data','var_names')
cat_indices <- pmatch(var_names, catvars, nomatch=0)
# get the proportions
for (i in c(1:dims[1])) {
for (j in c(1:dims[2])) {
start_index <- sum(categories[1:cat_indices[i]-1])
startk <- cat1 + start_index
endk <- cat2 + start_index
yy[i,j] <- sum(gh5$means_and_variances_data$observed_probs$values[startk:endk,j])
}
}
# return the proportions
return(yy)
}
##########################################################################
#
# mplus.list.variables - list the variables in individual data
#
# arguments: none
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.variables('ex8.1.gh5')
#
mplus.list.variables <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if individual data exists
if ( !("individual_data" %in% names(gh5)) ) {
stop("mplus.list.variables requires individual data.\n\nUse TYPE=PLOT1 or TYPE=PLOT3 setting in Mplus to store individual data.")
}
cat(c("\nList of variable names to use in the following functions:\n"))
cat(c(" - mplus.plot.histogram\n"))
cat(c(" - mplus.plot.scatterplot\n"))
cat(c(" - mplus.get.data\n"))
cat(c("\nVariables:\n"))
# get the variable names from individual_data and lookup the indices
var_names <- mplus.get.group.attribute(file, 'individual_data', 'var_names')
var_names <- gsub("(^\\s+|\\s+$)", "", var_names, perl=TRUE)
var_names
}
##########################################################################
#
# mplus.get.data - return the individual data for the quoted variable
#
# arguments:
# file - the quoted name of an existing GH5 file
# v - name of variable to plot
#
# eg. mplus.get.data('ex8.1.gh5','y1')
#
mplus.get.data <- function(file,v) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
# check that the series exists
gh5 <- h5dump(file, load=TRUE)
if (!("individual_data" %in% names(gh5))) {
stop("mplus.get.data requires individual data.\n\nUse TYPE=PLOT1 or TYPE=PLOT3 setting in Mplus to store individual data.")
}
if (missing(v)) {
stop("mplus.get.data requires the name of a variable.\n\nUse mplus.list.variables() to get the list of variable names.")
}
# variables are stored in uppercase
var <- toupper(v)
# get the variable names from individual_data and lookup the indices
var_names <- mplus.get.group.attribute(file, 'individual_data', 'var_names')
index <- pmatch(var, var_names, nomatch=0)
if (index == 0) {
cstr <- paste(c("Unknown variable:"),var,"\n")
stop(cstr)
}
# get the data for the variable
xx <- gh5$individual_data$raw_data[index,]
xx
}
##########################################################################
#
# mplus.plot.scatterplot - plot the scatterplot for the 2 quoted variables
#
# arguments:
# file - the quoted name of an existing GH5 file
# xv - name of variable on the x-axis
# yv - name of variable on the y-axis
#
# eg. mplus.plot.scatterplot('ex8.1.gh5','y1','y2')
#
mplus.plot.scatterplot <- function(file, xv, yv) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if individual data exists
if ( !("individual_data" %in% names(gh5)) ) {
stop("mplus.plot.scatterplot requires individual data.\n\nUse TYPE=PLOT1 or TYPE=PLOT3 setting in Mplus to store individual data\nfor scatterplots.")
}
if (missing(xv) || missing(yv)) {
stop("mplus.plot.scatterplot requires the names of two variables.\n\nUse mplus.list.variables() to get the list of variable names.")
}
# variables are stored in uppercase
xvar <- toupper(xv)
yvar <- toupper(yv)
# get the variable names from individual_data and lookup the indices
var_names <- mplus.get.group.attribute(file, 'individual_data', 'var_names')
xindex <- pmatch(xvar, var_names, nomatch=0)
yindex <- pmatch(yvar, var_names, nomatch=0)
if (xindex == 0) {
cstr <- paste(c("Unknown x-variable:"),xvar,"\n")
stop(cstr)
}
if (yindex == 0) {
cstr <- paste(c("Unknown y-variable:"),yvar,"\n")
stop(cstr)
}
# get the data for the 2 variables
xx <- mplus.get.data(file,xvar)
yy <- mplus.get.data(file,yvar)
plot(xx,yy,xlab=xvar,ylab=yvar)
}
##########################################################################
#
# mplus.plot.histogram - plot the histogram for the quoted variable, using the
# specified number of bins (the default is 10 bins)
#
# arguments:
# file - the quoted name of an existing GH5 file
# v - name of variable to plot
# bins - the number of bins to use
#
# eg. mplus.plot.histogram('y1',5)
#
mplus.plot.histogram <- function(file,v,bins=10) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if individual data exists
if ( !("individual_data" %in% names(gh5)) ) {
stop("mplus.plot.histogram requires individual data.\n\nUse TYPE=PLOT1 or TYPE=PLOT3 setting in Mplus to store individual data\nfor histograms.")
}
if (missing(v)) {
stop("mplus.plot.histogram requires the name of a variable.\n\nUse mplus.list.variables() to get the list of variable names.")
}
# the number of bins should be greater than 0
if (bins <= 0) {
stop("The number of bins should be greater than 0.")
}
# variables are stored in uppercase
var <- toupper(v)
# get the variable names from individual_data and lookup the indices
var_names <- mplus.get.group.attribute(file, 'individual_data', 'var_names')
index <- pmatch(var, var_names, nomatch=0)
if (index == 0) {
cstr <- paste(c("Unknown variable:"),var,"\n")
stop(cstr)
}
xx <- mplus.get.data(file,v)
cstr <- paste(c("Histogram of"),var)
hist(xx,breaks=seq(min(xx),max(xx),length=bins+1),col="red",main=cstr,xlab=var,right=TRUE)
}
######################################################################################################
# Functions for BAYESIAN plots
######################################################################################################
#=========================================================================
#
# mplus.list.bayesian.parameters - list the parameters in bayesian data
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.bayesian.parameters('ex8.1.gh5')
#
mplus.list.bayesian.parameters <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("mplus.list.bayesian.parameters requires bayesian data.\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
cat(c("\nList of parameters to use in the following functions:\n"))
cat(c(" - mplus.plot.bayesian.trace_plots\n"))
cat(c("\nParameters:\n"))
# get the parameter statements from bayesian_data and lookup the indices
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
nplaus <- length(statements)
for (i in c(1:nplaus)) {
cstr <- sprintf("[%d] %s", i, statements[i])
cat(cstr,sep="\n")
}
invisible(statements)
}
#=========================================================================
#
# mplus.get.bayesian.parameter.data - get the bayesian data for the given parameter/chain
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramstr - the quoted name of a parameter or the parameter index
# chainnum - the chain number
#
# eg. mplus.get.bayesian.parameter.data('ex8.1.gh5','parameter 1',1)
#
mplus.get.bayesian.parameter.data <- function(file,paramstr,chainnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data.\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (is.character(paramstr)) {
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
statements <- tolower(statements)
paramstr <- tolower(paramstr)
paramidx <- pmatch(paramstr, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("Unknown parameter:"),paramstr,"\n")
stop(cstr)
}
} else {
# first dimension is the number of parameters
# second dimension is the number of iterations
# third dimension is the number of chains
dims <- attr(gh5$bayesian_data$parameters_autocorr$parameters,"dim")
paramidx <- paramstr
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- parameter index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
xx <- gh5$bayesian_data$parameters_autocorr$parameters[paramidx,,chainnum]
xx
}
#=========================================================================
#
# mplus.get.bayesian.prior.parameter.data - get the prior data for the given parameter
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramstr - the quoted parameter label or the parameter index
#
# eg. mplus.get.bayesian.prior.parameter.data('ex8.1.gh5',1)
#
mplus.get.bayesian.prior.parameter.data <- function(file,paramstr) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data.\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
# first dimension is the number of parameters
# second dimension is the number of priors
dims <- attr(gh5$bayesian_data$parameters_autocorr$priors,"dim")
if (is.character(paramstr)) {
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
statements <- tolower(statements)
paramstr <- tolower(paramstr)
paramidx <- pmatch(paramstr, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown parameter:"),paramstr,"\n")
stop(cstr)
}
} else {
# first dimension is the number of parameters
# second dimension is the number of priors
dims <- attr(gh5$bayesian_data$parameters_autocorr$priors,"dim")
paramidx <- paramstr
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- parameter index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
xx <- gh5$bayesian_data$parameters_autocorr$priors[,paramidx]
xx
}
#=========================================================================
#
# mplus.get.bayesian.autocorrelation - get the autocorrelation data for the given parameter
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramidx - the quoted parameter label
# chainnum - the chain number
#
# eg. mplus.get.bayesian.autocorrelation('ex8.1.gh5','parameter 1',1)
#
mplus.get.bayesian.autocorrelation <- function(file,paramstr,chainnum=1) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data.\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
# first dimension is the number of autocorrelation
# second dimension is the number of parameters
# third dimension is the number of chains
dims <- attr(gh5$bayesian_data$parameters_autocorr$autocorrelation,"dim")
if (is.character(paramstr)) {
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
statements <- tolower(statements)
paramstr <- tolower(paramstr)
paramidx <- pmatch(paramstr, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("Unknown parameter:"),paramstr,"\n")
stop(cstr)
}
} else {
paramidx <- paramstr
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- parameter index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
if (chainnum < 1 && chainnum > dims[3]) {
cstr <- paste("- invalid chain number: ", chainnum,"\n\nThe chain number must be between 1 and ", dims[3], ".")
stop(cstr)
}
xx <- gh5$bayesian_data$parameters_autocorr$autocorrelation[,paramidx,chainnum]
xx
}
#=========================================================================
#
# mplus.plot.bayesian.traceplot - list the parameters in bayesian data
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramstr - the quoted name of a parameter
#
# eg. mplus.plot.bayesian.traceplot('ex8.1.gh5','parameter 1')
#
mplus.plot.bayesian.traceplot <- function(file,paramstr) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
# get the dimensions of parameters array
# first dimension is the number of parameters
# second dimension is the number of iterations
# third dimension is the number of chains
dims <- attr(gh5$bayesian_data$parameters_autocorr$parameters,"dim")
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
if (is.character(paramstr)) {
lcstatements <- tolower(statements)
paramstr <- tolower(paramstr)
paramidx <- pmatch(paramstr, lcstatements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown parameter:"),paramstr,"\n")
stop(cstr)
}
} else {
paramidx <- paramstr
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- parameter index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
label <- statements[paramidx]
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
xx <- array(0, c(dims[2],dims[3]))
yy <- array(0, c(dims[2],dims[3]))
for (i in c(1:dims[3])) {
yy[,i] <- mplus.get.bayesian.parameter.data(file, paramidx, i)
}
for (i in c(1:dims[2])) {
xx[i,] <- i
}
colors <- rainbow(dims[3])
ndist <- mplus.get.dataset.attribute(file, 'bayesian_data/parameters_autocorr/parameters', 'ndistribution')
# plot the traceplot
cstr <- paste("Trace plot of:",label)
plot(xx,yy,xlab="",ylab="",main=cstr,type='n')
for (i in c(1:dims[3])) {
lines(xx[,i],yy[,i],col=colors[i])
}
abline(v=ndist,untf=FALSE,col='red')
}
#=========================================================================
#
# mplus.plot.bayesian.distribution - plot the histogram for the parameter, using the
# specified number of bins (the default is 100 bins)
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramstr - the quoted name of the parameter
# bins - the number of bins to use
#
# eg. mplus.plot.bayesian.distribution('bayes.gh5','parameter 1',50)
#
mplus.plot.bayesian.distribution <- function(file,paramstr,bins=100) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(paramstr)) {
stop("- requires the parameter label or index.\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
# the number of bins should be greater than 0
if (bins <= 0) {
stop("The number of bins should be greater than 0.")
}
# get the dimensions of parameters array
# first dimension is the number of parameters
# second dimension is the number of iterations
# third dimension is the number of chains
dims <- attr(gh5$bayesian_data$parameters_autocorr$parameters,"dim")
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
if (is.character(paramstr)) {
lcstatements <- tolower(statements)
paramstr <- tolower(paramstr)
paramidx <- pmatch(paramstr, lcstatements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown parameter:"),paramstr,"\n")
stop(cstr)
}
} else {
paramidx <- paramstr
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("Parameter index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
label <- statements[paramidx]
ndist <- mplus.get.dataset.attribute(file, 'bayesian_data/parameters_autocorr/parameters', 'ndistribution')
xxc <- array(0, c(dims[2],dims[3]))
xx <- array(0, c((dims[2]-ndist)*dims[3]))
for (i in c(1:dims[3])) {
xxc[,i] <- mplus.get.bayesian.parameter.data(file, paramidx, i)
}
start <- 0
#print(dims)
for (i in c(1:dims[3])) {
for (j in c((ndist+1):dims[2])) {
start <- start + 1
#cstr <- paste(start, j, i)
#print(cstr)
#print(xxc[j,i])
xx[start] <- xxc[j,i]
}
}
cstr <- paste(c("Distribution of:"),label)
h <- hist(xx,breaks=seq(min(xx),max(xx),length=bins+1),col="red",main=cstr,xlab='Estimate',ylab='Count')
xxmode <- h$mids[h$counts == max(h$counts)]
xxmean <- mean(xx)
xxsd <- sd(xx)
xxmedian <- median(xx)
left <- quantile(xx, 0.025)
right <- quantile(xx, 0.975)
abline(v=xxmode,untf=FALSE,col='green')
abline(v=xxmean,untf=FALSE,col='brown')
abline(v=xxmedian,untf=FALSE,col='purple')
abline(v=left,untf=FALSE,col='blue')
abline(v=right,untf=FALSE,col='blue')
modestr <- sprintf("Mode = %0.5f", xxmode)
meanstr <- sprintf("Mean = %0.5f, Std Dev = %0.5f", xxmean, xxsd)
medianstr <- sprintf("Median = %0.5f", xxmedian)
lowci <- sprintf("95%% Lower CI = %0.5f", left)
uppci <- sprintf("95%% Upper CI = %0.5f", right)
ldesc <- c(meanstr, medianstr, modestr, lowci, uppci)
lcol <- c('brown','purple','green','blue','blue')
legend("topright",ldesc,col=lcol,lty=c(1,1,1,1,1),lwd=c(2.5,2.5,2.5,2.5,2.5))
#invisible(xx)
}
#=========================================================================
#
# mplus.plot.bayesian.prior.distribution - plot the histogram for the parameter, using the
# specified number of bins (the default is 100 bins)
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramstr - the quoted name of the parameter
# bins - the number of bins to use
#
# eg. mplus.plot.bayesian.prior.distribution('bayes.gh5','parameter 1',50)
#
mplus.plot.bayesian.prior.distribution <- function(file,paramstr,bins=100) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(paramstr)) {
stop("- requires the parameter index\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
# the number of bins should be greater than 0
if (bins <= 0) {
stop("- the number of bins should be greater than 0")
}
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
# get the dimensions of parameters array
# first dimension is the number of parameters
# second dimension is the number of priors
dims <- attr(gh5$bayesian_data$parameters_autocorr$priors,"dim")
if (is.character(paramstr)) {
lcstatements <- tolower(statements)
paramstr <- tolower(paramstr)
paramidx <- pmatch(paramstr, lcstatements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown parameter:"),paramstr,"\n")
stop(cstr)
}
} else {
paramidx <- paramstr
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- parameter index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
plabel <- statements[paramidx]
xx <- mplus.get.bayesian.prior.parameter.data(file, paramidx)
if (min(xx) == 999 && max(xx) == 999) {
stop("- prior distributions for this parameter cannot be displayed because the prior is improper")
} else if (min(xx) == 998 && max(xx) == 998) {
stop("- prior distributions for this parameter are not available")
}
cstr <- paste(c("Prior distribution of:"),plabel)
h <- hist(xx,breaks=seq(min(xx),max(xx),length=bins+1),col="red",main=cstr,xlab='Estimate',ylab='Count')
xxmode <- h$mids[h$counts == max(h$counts)]
xxmean <- mean(xx)
xxsd <- sd(xx)
xxmedian <- median(xx)
left <- quantile(xx, 0.025)
right <- quantile(xx, 0.975)
abline(v=xxmode,untf=FALSE,col='green')
abline(v=xxmean,untf=FALSE,col='brown')
abline(v=xxmedian,untf=FALSE,col='purple')
abline(v=left,untf=FALSE,col='blue')
abline(v=right,untf=FALSE,col='blue')
modestr <- sprintf("Mode = %0.5f", xxmode)
meanstr <- sprintf("Mean = %0.5f, Std Dev = %0.5f", xxmean, xxsd)
medianstr <- sprintf("Median = %0.5f", xxmedian)
lowci <- sprintf("95%% Lower CI = %0.5f", left)
uppci <- sprintf("95%% Upper CI = %0.5f", right)
ldesc <- c(meanstr, medianstr, modestr, lowci, uppci)
lcol <- c('brown','purple','green','blue','blue')
legend("topright",ldesc,col=lcol,lty=c(1,1,1,1,1),lwd=c(2.5,2.5,2.5,2.5,2.5))
invisible(xx)
}
#=========================================================================
#
# mplus.plot.bayesian.autocorrelation - plot the autocorrelation histogram for the parameter
# for the given chain
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramstr - the quoted name of the parameter
# chainnum - the chain number
#
# eg. mplus.plot.bayesian.autocorrelation('bayes.gh5','parameter 1',1)
#
mplus.plot.bayesian.autocorrelation <- function(file,paramstr,chainnum=1) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian dat.\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(paramstr)) {
stop("- requires the parameter label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
statements <- mplus.get.group.dataset(file, 'bayesian_data/parameters_autocorr', 'statements')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
# get the dimensions of parameters array
# first dimension is the number of autocorrelations
# second dimension is the number of parameters
# third dimension is the number of chains
dims <- attr(gh5$bayesian_data$parameters_autocorr$autocorrelation,"dim")
if (is.character(paramstr)) {
lcstatements <- tolower(statements)
paramstr <- tolower(paramstr)
paramidx <- pmatch(paramstr, lcstatements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown parameter:"),paramstr,"\n")
stop(cstr)
}
} else {
paramidx <- paramstr
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- parameter index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
plabel <- statements[paramidx]
if (chainnum < 1 && chainnum > dims[3]) {
cstr <- paste("- invalid chain number: ", chainnum,"\n\nThe chain number must be between 1 and ", dims[3], ".")
stop(cstr)
}
yy <- mplus.get.bayesian.autocorrelation(file,paramidx,chainnum)
xx <- as.character(1:dims[1])
cstr <- paste(c("Autocorrelation (chain "),format(chainnum),c("): "),plabel)
barplot(yy,ylim=c(-1,1),names.arg=xx,col='red',main=cstr)
invisible(xx)
}
#=========================================================================
#
# mplus.list.bayesian.predictive.labels - list the parameters in bayesian data
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.bayesian.predictive.labels('ex8.1.gh5')
#
mplus.list.bayesian.predictive.labels <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
cat(c("\nList of parameters to use in the following functions:\n"))
cat(c(" - mplus.plot.bayesian.predictive.scatterplots\n"))
cat(c(" - mplus.plot.bayesian.predictive.distribution\n"))
cat(c("\nPredictive labels:\n"))
# get the parameter statements from bayesian_data and lookup the indices
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
statements
}
#=========================================================================
#
# mplus.get.bayesian.predictive.observed - get the predictive observed data
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the quoted name of the parameter
#
# eg. mplus.get.bayesian.predictive.observed('bayes.gh5','parameter 1')
#
mplus.get.bayesian.predictive.observed <- function(file,plabel) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the predictive label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
if (is.character(plabel)) {
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
# get the dimensions of parameters array
# first dimension is the number of ???
# second dimension is the number of predictive labels
dims <- attr(gh5$bayesian_data$predictive$observed,"dim")
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[2]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
xx <- gh5$bayesian_data$predictive$observed[,paramidx]
xx
}
#=========================================================================
#
# mplus.get.bayesian.predictive.replicated - get the predictive replicated data
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the quoted name of the parameter
#
# eg. mplus.get.bayesian.predictive.replicated('bayes.gh5','parameter 1')
#
mplus.get.bayesian.predictive.replicated <- function(file,plabel) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the predictive label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
if (is.character(plabel)) {
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
# get the dimensions of parameters array
# first dimension is the number of ???
# second dimension is the number of predictive labels
dims <- attr(gh5$bayesian_data$predictive$replicated,"dim")
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[2]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
xx <- gh5$bayesian_data$predictive$replicated[,paramidx]
xx
}
#=========================================================================
#
# mplus.get.bayesian.predictive.lowerci - get the predictive lower CI
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the quoted name of the parameter
#
# eg. mplus.get.bayesian.predictive.lowerci('bayes.gh5','parameter 1')
#
mplus.get.bayesian.predictive.lowerci <- function(file,plabel) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the predictive label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
if (is.character(plabel)) {
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
# get the dimensions of parameters array
# first dimension is the number of pvalues
# second dimension is the number of predictive labels
dims <- attr(gh5$bayesian_data$predictive$pvalues,"dim")
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[2]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
gh5$bayesian_data$predictive$pvalues[1,paramidx]
}
#=========================================================================
#
# mplus.get.bayesian.predictive.upperci - get the predictive upper CI
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the quoted name of the parameter
#
# eg. mplus.get.bayesian.predictive.upperci('bayes.gh5','parameter 1')
#
mplus.get.bayesian.predictive.upperci <- function(file,plabel) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the predictive label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
if (is.character(plabel)) {
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
# get the dimensions of parameters array
# first dimension is the number of pvalues
# second dimension is the number of predictive labels
dims <- attr(gh5$bayesian_data$predictive$pvalues,"dim")
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[2]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
gh5$bayesian_data$predictive$pvalues[2,paramidx]
}
#=========================================================================
#
# mplus.get.bayesian.predictive.pvalue - get the predictive pvalue
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the quoted name of the parameter
#
# eg. mplus.get.bayesian.predictive.pvalue('bayes.gh5','parameter 1')
#
mplus.get.bayesian.predictive.pvalue <- function(file,plabel) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the predictive label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
if (is.character(plabel)) {
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
# get the dimensions of parameters array
# first dimension is the number of pvalues
# second dimension is the number of predictive labels
dims <- attr(gh5$bayesian_data$predictive$pvalues,"dim")
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[2]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
gh5$bayesian_data$predictive$pvalues[3,paramidx]
}
#=========================================================================
#
# mplus.get.bayesian.predictive.pvalue_type - get the predictive pvalue type
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the quoted name of the parameter
#
# eg. mplus.get.bayesian.predictive.pvalue_type('bayes.gh5','parameter 1')
#
mplus.get.bayesian.predictive.pvalue_type <- function(file,plabel) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the predictive label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
ptypes <- mplus.get.group.attribute(file,'/bayesian_data/predictive','types')
if (is.character(plabel)) {
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, statements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
# get the dimensions of parameters array
# first dimension is the number of pvalues
dims <- attr(ptypes,"dim")
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
ptypes[paramidx]
}
#=========================================================================
#
# mplus.plot.bayesian.predictive.scatterplot - plot the predictive checking scatterplot
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the predictive label
#
# eg. mplus.plot.bayesian.predictive.scatterplot('bayes.gh5','label 1')
#
mplus.plot.bayesian.predictive.scatterplot <- function(file,plabel) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the predictive label\n\nUse mplus.list.bayesian.parameters() to get the list of parameters.")
}
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
dims <- attr(statements,"dim")
if (is.character(plabel)) {
lcstatements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, lcstatements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
rep <- mplus.get.bayesian.predictive.replicated(file,paramidx)
obs <- mplus.get.bayesian.predictive.observed(file,paramidx)
omin <- min(obs)
omax <- max(obs)
rmin <- min(rep)
rmax <- max(rep)
if (omin < rmin) {
rmin <- omin
}
if (omax > rmax) {
rmax <- omax
}
plot(obs,rep,xlab='Observed',ylab='Replicated',xlim=c(rmin,rmax),ylim=c(rmin,rmax))
# print(rmin)
# print(rmax)
xx=c(rmin,rmax)
yy=c(rmin,rmax)
lines(xx,yy,col='green')
#text(50,50,"test")
lowci <- mplus.get.bayesian.predictive.lowerci(file,paramidx)
uppci <- mplus.get.bayesian.predictive.upperci(file,paramidx)
pval <- mplus.get.bayesian.predictive.pvalue(file,paramidx)
ptype <- mplus.get.bayesian.predictive.pvalue_type(file,paramidx)
if (ptype == -1) {
text2 <- "(Proportion of Points in the Lower Right Half)";
}
else if (ptype == 1) {
text2 <- "(Proportion of Points in the Upper Left Half)";
} else {
text2 <- "(Smallest Proportion of Points in the Upper versus Lower Halves)";
}
#ldesc <- sprintf("95%% Confidence Interval for the Difference\n%0.3f %0.3f\nPosterior Predictive P-Value %0.3f\n%s",
# lowci, uppci, pval, text2)
#mtext(ldesc, side=3)
line1 <- sprintf("95%% Confidence Interval for the Difference")
line2 <- sprintf(" %0.3f %0.3f ", lowci, uppci)
line3 <- sprintf("")
line4 <- sprintf(" Posterior Predictive P-Value %0.3f ", pval)
line5 <- sprintf("")
line6 <- text2
ldesc <- c(line1,line2,line3,line4,line5,line6)
legend('topleft',ldesc,xjust=1)
title(statements[paramidx])
}
#=========================================================================
#
# mplus.plot.bayesian.predictive.distribution - plot the predictive checking distribution
#
# arguments:
# file - the quoted name of an existing GH5 file
# plabel - the predictive label
# bins - the number of bins, default is 10
#
# eg. mplus.plot.bayesian.predictive.distribution('bayes.gh5','label 1')
#
mplus.plot.bayesian.predictive.distribution <- function(file,plabel,bins=100) {
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data\n\nUse TYPE=PLOT2 setting in Mplus with a Bayesian analysis.")
}
if (missing(plabel)) {
stop("- requires the index of the predictive label\n\nUse mplus.list.bayesian.predictive.labels() to get the list of parameters.")
}
statements <- mplus.get.group.attribute(file, 'bayesian_data/predictive', 'labels')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
dims <- attr(statements,"dim")
if (is.character(plabel)) {
lcstatements <- tolower(statements)
plabel <- tolower(plabel)
paramidx <- pmatch(plabel, lcstatements, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown predictive label:"),plabel,"\n")
stop(cstr)
}
} else {
paramidx <- plabel
if (paramidx < 1 || paramidx > dims[1]) {
cstr <- paste("- predictive label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.parameters to see the list of parameters.\n")
stop(cstr)
}
}
rep <- mplus.get.bayesian.predictive.replicated(file,paramidx)
obs <- mplus.get.bayesian.predictive.observed(file,paramidx)
omin <- min(obs)
omax <- max(obs)
rmin <- min(rep)
rmax <- max(rep)
if (omin < rmin) {
rmin <- omin
}
if (omax > rmax) {
rmax <- omax
}
npred <- length(rep)
vals <- array(c(npred))
for (i in c(1:npred)) {
vals[i] <- obs[i] - rep[i]
}
hist(vals,breaks=seq(min(vals),max(vals),length=bins+1),col="red",main=statements[paramidx],xlab='Observed - Replicated',ylab='Count')
xxmedian <- median(vals)
abline(v=xxmedian,untf=FALSE,col='purple')
# print(rmin)
# print(rmax)
xx=c(rmin,rmax)
yy=c(rmin,rmax)
lines(xx,yy,col='green')
#text(50,50,"test")
lowci <- mplus.get.bayesian.predictive.lowerci(file,paramidx)
uppci <- mplus.get.bayesian.predictive.upperci(file,paramidx)
pval <- mplus.get.bayesian.predictive.pvalue(file,paramidx)
#ldesc <- sprintf("95%% Confidence Interval for the Difference\n%0.3f %0.3f\nPosterior Predictive P-Value %0.3f\n%s",
# lowci, uppci, pval, text2)
#mtext(ldesc, side=3)
line1 <- sprintf("95%% Confidence Interval for the Difference")
line2 <- sprintf(" %0.3f %0.3f ", lowci, uppci)
line3 <- sprintf("")
line4 <- sprintf(" Posterior Predictive P-Value %0.3f ", pval)
ldesc <- c(line1,line2,line3,line4)
legend('topleft',ldesc,xjust=1)
}
#=========================================================================
#
# mplus.list.bayesian.plausible.labels - list the plausible labels in bayesian data
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.bayesian.plausible.labels('ex8.1.gh5')
#
mplus.list.bayesian.plausible.labels <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("mplus.list.bayesian.plausible.labels requires bayesian data and factor scores.\n\nUse TYPE=PLOT3 and the FACTORS option in Mplus with a Bayesian analysis.")
}
# check if plausible exists
if ( !("plausible" %in% names(gh5$bayesian_data)) ) {
stop("mplus.list.bayesian.plausible.labels requires bayesian data factor scores.\n\nUse TYPE=PLOT3 and the FACTORS option in Mplus with a Bayesian analysis.")
}
cat(c("\nList of labels to use in the following functions:\n"))
cat(c(" - mplus.plot.bayesian.plausible.distribution\n"))
cat(c("\nPlausible labels:\n"))
# get the parameter statements from bayesian_data and lookup the indices
statements <- mplus.get.group.attribute(file, 'bayesian_data/plausible', 'plauslabels')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
nplaus <- length(statements)
for (i in c(1:nplaus)) {
cstr <- sprintf("[%d] %s", i, statements[i])
cat(cstr,sep="\n")
}
# cat(statements,sep="\n")
invisible(statements)
}
#=========================================================================
#
# mplus.get.bayesian.plausible.data - get plausible data for the given plausible label
#
# arguments:
# file - the quoted name of an existing GH5 file
# plauslabel - the plausible label or the index of the plausible label
# obs - the observation index or 0 for overall
#
# eg. mplus.get.bayesian.plausible.data('ex8.1.gh5',1,obs)
#
mplus.get.bayesian.plausible.data <- function(file,plauslabel,obs=0) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data and factor scores\n\nUse TYPE=PLOT3 and the FACTORS option in Mplus with a Bayesian analysis.")
}
# check if plausible exists
if ( !("plausible" %in% names(gh5$bayesian_data)) ) {
stop("- requires bayesian data and factor scores\n\nUse TYPE=PLOT3 and the FACTORS option in Mplus with a Bayesian analysis.")
}
if (missing(plauslabel)) {
stop("- requires the plausible label\n\nUse mplus.list.bayesian.plausible.labels() to get the list of plausible labels.")
}
if (is.character(plauslabel)) {
labels <- mplus.get.group.attribute(file,'bayesian_data/plausible','plauslabels')
labels <- gsub("(^\\s+|\\s+$)", "", labels, perl=TRUE)
labels <- tolower(labels)
plauslabel <- tolower(plauslabel)
paramidx <- pmatch(plauslabel, labels, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown plausible label:"),plauslabel,"\n")
stop(cstr)
}
} else {
# get the dimensions of plausible array
# first dimension is the number of observations
# second dimension is the number of imputations
# third dimension is the number of labels
dims <- attr(gh5$bayesian_data$plausible$plausible,"dim")
paramidx <- plauslabel
if (paramidx < 1 || paramidx > dims[3]) {
cstr <- paste("- plausible label index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.plausible.labels to see the list of plausible labels.\n")
stop(cstr)
}
}
if (obs == 0) {
xx <- array(0, c(dims[1]*dims[2]))
start <- 0
for (i in c(1:dims[1])) {
for (j in c(1:dims[2])) {
start <- start + 1
xx[start] <- gh5$bayesian_data$plausible$plausible[i,j,paramidx]
}
}
} else {
xx <- gh5$bayesian_data$plausible$plausible[obs,,paramidx]
}
xx
}
#=========================================================================
#
# mplus.plot.bayesian.plausible.distribution - plot the histogram for the plausible label, using the
# specified number of bins (the default is 100 bins for overall and 10 for a specific observation)
#
# arguments:
# file - the quoted name of an existing GH5 file
# paramstr - name or index of variable to plot
# obs - the observation number or 0
# bins - the number of bins to use
#
# eg. mplus.plot.bayesian.plausible.distribution('bayes.gh5',1,0)
#
mplus.plot.bayesian.plausible.distribution <- function(file,plauslabel,obs=0,bins=100) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if bayesian data exists
if ( !("bayesian_data" %in% names(gh5)) ) {
stop("- requires bayesian data and factor scores\n\nUse TYPE=PLOT3 and the FACTORS option in Mplus with a Bayesian analysis.")
}
# check if plausible exists
if ( !("plausible" %in% names(gh5$bayesian_data)) ) {
stop("- requires bayesian data and factor scores\n\nUse TYPE=PLOT3 and the FACTORS option in Mplus with a Bayesian analysis.")
}
if (missing(plauslabel)) {
stop("- requires the index of the plausible label\n\nUse mplus.list.bayesian.plausible.labels() to get the list of plausible labels.")
}
if (missing(bins)) {
if (obs == 0) {
bins = 100
} else {
bins = 10
}
}
# the number of bins should be greater than 0
if (bins <= 0) {
stop("- the number of bins should be greater than 0")
}
labels <- mplus.get.group.attribute(file,'bayesian_data/plausible','plauslabels')
labels <- gsub("(^\\s+|\\s+$)", "", labels, perl=TRUE)
adim <- attr(labels,'dim')
if (is.character(plauslabel)) {
lclabels <- tolower(labels)
plauslabel <- tolower(plauslabel)
paramidx <- pmatch(plauslabel, lclabels, nomatch=0)
if (paramidx == 0) {
cstr <- paste(c("- unknown plausible label:"),plauslabel,"\n")
stop(cstr)
}
} else {
paramidx <- plauslabel
if (paramidx < 1 || paramidx > adim[1]) {
cstr <- paste("- plausible index is out of range: ",paramidx,"\n\nUse mplus.list.bayesian.plausible.labels to see the list of plausible labels.\n")
stop(cstr)
}
}
xx <- mplus.get.bayesian.plausible.data(file,paramidx,obs)
xxmax <- max(xx)
xxmin <- min(xx)
# print(xxmax)
# print(xxmin)
if (obs == 0) {
cstr <- paste(c("Overall distribution of"),labels[paramidx])
} else {
cstr <- sprintf("Distribution of %s for Individual %d", labels[paramidx], obs)
}
h <- hist(xx,breaks=seq(min(xx),max(xx),length=bins+1),col="red",main=cstr,xlab='Estimate',ylab='Count')
xxmode <- h$mids[h$counts == max(h$counts)]
xxmean <- mean(xx)
xxsd <- sd(xx)
xxmedian <- median(xx)
left <- quantile(xx, 0.025,type=3)
right <- quantile(xx, 0.975,type=3)
abline(v=xxmode,untf=FALSE,col='green')
abline(v=xxmean,untf=FALSE,col='brown')
abline(v=xxmedian,untf=FALSE,col='purple')
abline(v=left,untf=FALSE,col='blue')
abline(v=right,untf=FALSE,col='blue')
modestr <- sprintf("Mode = %0.5f", xxmode)
meanstr <- sprintf("Mean = %0.5f, Std Dev = %0.5f", xxmean, xxsd)
medianstr <- sprintf("Median = %0.5f", xxmedian)
lowci <- sprintf("95%% Lower CI = %0.5f", left)
uppci <- sprintf("95%% Upper CI = %0.5f", right)
ldesc <- c(meanstr, medianstr, modestr, lowci, uppci)
lcol <- c('brown','purple','green','blue','blue')
legend("topleft",ldesc,col=lcol,lty=c(1,1,1,1,1),lwd=c(2.5,2.5,2.5,2.5,2.5))
invisible(xx)
}
######################################################################################################
# Functions for LOOP PLOT
######################################################################################################
#========================================================================
#
# mplus.list.loop.labels - list the loop variables
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.loop.labels('ex8.1.gh5')
#
mplus.list.loop.labels <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if loop data exists
if ( !("loop_data" %in% names(gh5)) ) {
stop("mplus.list.loop.labels requires loop data.\n\nUse TYPE=PLOT2 and the PLOT/LOOP keywords in MODEL CONSTRAINT or use the MOD keyword in MODEL INDIRECT.")
}
cat(c("\nList of loop labels to use in the following functions:\n"))
cat(c(" - mplus.plot.loop\n"))
cat(c(" - mplus.get.loop.estimates\n"))
cat(c(" - mplus.get.loop.lowerci\n"))
cat(c(" - mplus.get.loop.upperci\n"))
cat(c(" - mplus.get.loop.xvalues\n"))
cat(c("\nLoop labels:\n"))
# get the parameter statements from loop_data and lookup the indices
statements <- mplus.get.group.attribute(file, 'loop_data', 'labels')
statements <- gsub("(^\\s+|\\s+$)", "", statements, perl=TRUE)
nplaus <- length(statements)
for (i in c(1:nplaus)) {
cstr <- sprintf("[%d] %s", i, statements[i])
cat(cstr,sep="\n")
}
# cat(statements,sep="\n")
invisible(statements)
}
#========================================================================
#
# mplus.get.loop.estimates - get the estimates for the given loop label
#
# arguments:
# file - the quoted name of an existing GH5 file
# loopstr - the quoted loop label
#
# eg. mplus.get.loop.estimates('ex8.1.gh5','indirect')
#
mplus.get.loop.estimates <- function(file,loopstr=1) {
if (missing(file)) {
stop("- - name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste(" - file does not exist:",file)
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if loop data exists
if ( !("loop_data" %in% names(gh5)) ) {
stop(" - requires loop data\n\nUse TYPE=PLOT2 and the PLOT/LOOP keywords in MODEL CONSTRAINT.")
}
if (missing(loopstr)) {
loopstr=1
}
if (is.character(loopstr)) {
labels <- mplus.get.group.attribute(file,'loop_data','labels')
labels <- gsub("(^\\s+|\\s+$)", "", labels, perl=TRUE)
labels <- tolower(labels)
loopstr <- toupper(loopstr)
loopidx <- pmatch(loopstr, labels, nomatch=0)
if (loopidx == 0) {
cstr <- paste(c("- unknown loop label:"),loopstr,"\n")
stop(cstr)
}
} else {
# get the dimensions of the estimates dataset
# first dimension is the number of loop labels
# second dimension is the number of x points
dims <- attr(gh5$loop_data$estimates,'dim')
loopidx <- loopstr
if (loopidx <= 0 || loopidx > dims[1]) {
cstr <- paste(" - loop index is out of range: ",loopidx,"\n\nUse mplus.list.loop.labels to see the list of loop labels.\n")
stop(cstr)
}
}
gh5$loop_data$estimates[loopidx,]
}
#========================================================================
#
# mplus.get.loop.lowerci - get the lower CI values for the given loop label
#
# arguments:
# file - the quoted name of an existing GH5 file
# loopstr - the quoted loop label
#
# eg. mplus.get.loop.lowerci('ex8.1.gh5','indirect')
#
mplus.get.loop.lowerci <- function(file,loopstr=1) {
if (missing(file)) {
stop("- - name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste(" - file does not exist:",file)
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if loop data exists
if ( !("loop_data" %in% names(gh5)) ) {
stop(" - requires loop data\n\nUse TYPE=PLOT2 and the PLOT/LOOP keywords in MODEL CONSTRAINT.")
}
if (missing(loopstr)) {
loopstr=1
}
if (is.character(loopstr)) {
labels <- mplus.get.group.attribute(file,'loop_data','labels')
labels <- gsub("(^\\s+|\\s+$)", "", labels, perl=TRUE)
labels <- tolower(labels)
loopstr <- tolower(loopstr)
loopidx <- pmatch(loopstr, labels, nomatch=0)
if (loopidx == 0) {
cstr <- paste(c("- unknown loop label:"),loopstr,"\n")
stop(cstr)
}
} else {
# get the dimensions of the estimates dataset
# first dimension is the number of loop labels
# second dimension is the number of x points
dims <- attr(gh5$loop_data$estimates,'dim')
loopidx <- loopstr
if (loopidx <= 0 || loopidx > dims[1]) {
cstr <- paste(" - loop index is out of range: ",loopidx,"\n\nUse mplus.list.loop.labels to see the list of loop labels.\n")
stop(cstr)
}
}
gh5$loop_data$lowerci[loopidx,]
}
#========================================================================
#
# mplus.get.loop.upperci - get the upper CI values for the given loop label
#
# arguments:
# file - the quoted name of an existing GH5 file
# loopstr - the quoted loop label
#
# eg. mplus.get.loop.upperci('ex8.1.gh5','indirect')
#
mplus.get.loop.upperci <- function(file,loopstr=1) {
if (missing(file)) {
stop("- - name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste(" - file does not exist:",file)
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if loop data exists
if ( !("loop_data" %in% names(gh5)) ) {
stop(" - requires loop data\n\nUse TYPE=PLOT2 and the PLOT/LOOP keywords in MODEL CONSTRAINT.")
}
if (missing(loopstr)) {
loopstr=1
}
if (is.character(loopstr)) {
labels <- mplus.get.group.attribute(file,'loop_data','labels')
labels <- gsub("(^\\s+|\\s+$)", "", labels, perl=TRUE)
labels <- tolower(labels)
loopstr <- tolower(loopstr)
loopidx <- pmatch(loopstr, labels, nomatch=0)
if (loopidx == 0) {
cstr <- paste(c("- unknown loop label:"),loopstr,"\n")
stop(cstr)
}
} else {
# get the dimensions of the estimates dataset
# first dimension is the number of loop labels
# second dimension is the number of x points
dims <- attr(gh5$loop_data$estimates,'dim')
loopidx <- loopstr
if (loopidx <= 0 || loopidx > dims[1]) {
cstr <- paste(" - loop index is out of range: ",loopidx,"\n\nUse mplus.list.loop.labels to see the list of loop labels.\n")
stop(cstr)
}
}
gh5$loop_data$upperci[loopidx,]
}
#========================================================================
#
# mplus.get.loop.xvalues - get the x points for the loop plots
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.get.loop.xvalues('ex8.1.gh5')
#
mplus.get.loop.xvalues <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if loop data exists
if ( !("loop_data" %in% names(gh5)) ) {
stop("mplus.get.loop.upperci requires loop data.\n\nUse TYPE=PLOT2 and the PLOT/LOOP keywords in MODEL CONSTRAINT.")
}
gh5$loop_data$xvalues
}
#========================================================================
#
# mplus.plot.loop - plot the loop label
#
# arguments:
# file - the quoted name of an existing GH5 file
# loopvar - the index of the loop label
#
# eg. mplus.plot.loop('ex8.1.gh5',1)
#
mplus.plot.loop <- function(file,loopstr=1) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if loop data exists
if ( !("loop_data" %in% names(gh5)) ) {
stop("requires loop data.\n\nUse TYPE=PLOT2 and the PLOT/LOOP keywords in MODEL CONSTRAINT.")
}
if (missing(loopstr)) {
loopstr=1
}
# get the dimensions of the estimates dataset
# first dimension is the number of loop labels
# second dimension is the number of x points
props <- mplus.get.group.attribute(file,'loop_data','properties')
if (is.character(loopstr)) {
# get the parameter statements from loop_data and lookup the indices
labels <- mplus.get.group.attribute(file, 'loop_data', 'labels')
labels <- gsub("(^\\s+|\\s+$)", "", labels, perl=TRUE)
labels <- tolower(labels)
loopstr <- tolower(loopstr)
loopidx <- pmatch(loopstr, labels, nomatch=0)
if (loopidx == 0) {
cstr <- paste(c("Unknown loop label:"),loopstr,"\n")
stop(cstr)
}
} else {
if (loopstr <= 0 || loopstr > props[1]) {
cstr <- paste("Loop index is out of range: ",loopvar,"\n\nUse mplus.list.loop.labels to see the list of loop labels.\n")
stop(cstr)
}
loopidx <- loopstr
}
labels <- mplus.get.group.attribute(file,'loop_data','labels')
labels <- gsub("(^\\s+|\\s+$)", "", labels, perl=TRUE)
loopvar <- mplus.get.group.attribute(file,'loop_data','loop_variable')
loopvar <- gsub("(^\\s+|\\s+$)", "", loopvar, perl=TRUE)
xx <- array(0,c(3,props[2]))
xx[1,] <- mplus.get.loop.xvalues(file)
xx[2,] <- mplus.get.loop.xvalues(file)
xx[3,] <- mplus.get.loop.xvalues(file)
yy <- array(0,c(3,props[2]))
yy[1,] <- mplus.get.loop.estimates(file,loopidx)
yy[2,] <- mplus.get.loop.lowerci(file,loopidx)
yy[3,] <- mplus.get.loop.upperci(file,loopidx)
# plot the loop
cstr <- paste("Loop plot for",labels[loopidx])
plot(xx,yy,xlab=loopvar,ylab=labels[loopidx],main=cstr,type='n')
lines(xx[1,],yy[1,],col='red')
lines(xx[2,],yy[2,],col='blue')
lines(xx[3,],yy[3,],col='blue')
# abline(v=0,untf=FALSE,col='black')
# abline(h=0,untf=FALSE,col='black')
grid(NULL, NULL, lty=6, col='cornsilk2')
}
######################################################################################################
# Functions for IRT plots
######################################################################################################
#========================================================================
# mplus.list.irt.variables
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.irt.variables('ex7.27.gh5')
#
mplus.list.irt.variables <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if irt data exists
if ( !("irt_data" %in% names(gh5)) ) {
stop("IRT data is required.\n\nUse TYPE=PLOT2.")
}
cat(c("\nList of variables to use in the following functions:\n"))
cat(c(" - mplus.compute.irt.icc\n"))
cat(c(" - mplus.plot.irt.icc\n"))
cat(c("\nVariables for 'uvar' argument:\n"))
ulabels <- mplus.get.group.attribute(file,'irt_data','ulabels')
ulabels <- gsub("(^\\s+|\\s+$)", "", ulabels, perl=TRUE)
nvar <- length(ulabels)
for (i in c(1:nvar)) {
cstr <- sprintf("[%d] %s", i, ulabels[i])
cat(cstr,sep="\n")
}
invisible(ulabels)
}
#========================================================================
# mplus.list.irt.xvariables
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.irt.xvariables('ex7.27.gh5')
#
mplus.list.irt.xvariables <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if irt data exists
if ( !("irt_data" %in% names(gh5)) ) {
stop("IRT data is required.\n\nUse TYPE=PLOT2.")
}
cat(c("\nList of variables to use in the following functions:\n"))
cat(c(" - mplus.compute.irt.icc\n"))
cat(c(" - mplus.plot.irt.icc\n"))
cat(c("\nVariables for the 'xvar' argument:\n"))
flabels <- mplus.get.group.attribute(file,'irt_data','flabels')
flabels <- gsub("(^\\s+|\\s+$)", "", flabels, perl=TRUE)
nvar <- length(flabels)
for (i in c(1:nvar)) {
cstr <- sprintf("[%d] %s", i, flabels[i])
cat(cstr,sep="\n")
}
invisible(flabels)
}
#========================================================================
# mplus.compute.irt.icc
#
# arguments:
# file - the quoted name of an existing GH5 file (required)
# group - the group number (required)
# xvar - the variable for the x-axis, can be the variable index or quoted variable name (required)
# uvar - the indicator variable, can be the variable index or the quoted variable name (required)
# cat - the category number (required)
# xvector -> the vector containing x values to use (required)
# covariates -> the vector containing values for all the other covariates (not required, sample mean used if not given)
#
# eg. mplus.compute.irt.icc('ex7.27.gh5',1,'F','U1',1,seq(-3,3,0.2))
#
mplus.compute.irt.icc <- function(file,group,xvar,uvar,cat,xvector,covariates) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if irt data exists
if ( !("irt_data" %in% names(gh5)) ) {
stop("IRT data is required.\n\nUse TYPE=PLOT2.")
}
# properties[1] - number of factors
# properties[2] - number of factors/covariates
# properties[3] - number of indicators
# properties[4] - number of classes
# properties[5] - maximum number of categories
props <- mplus.get.group.attribute(file,'irt_data','properties')
num_fx <- as.integer(props[2])
num_r <- as.integer(props[3])
max_num_cat <- as.integer(props[5])
flabels <- mplus.get.group.attribute(file,'irt_data','flabels')
flabels <- gsub("(^\\s+|\\s+$)", "", flabels, perl=TRUE)
ulabels <- mplus.get.group.attribute(file,'irt_data','ulabels')
ulabels <- gsub("(^\\s+|\\s+$)", "", ulabels, perl=TRUE)
if (missing(xvar)) {
stop("The x-axis variable (xvar) is required.")
} else {
if (is.character(xvar)) {
xvar <- toupper(xvar)
index <- pmatch(xvar, flabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown x-variable: %s\n", xvar)
stop(cstr)
}
fidx = index
} else {
if (xvar <= 0 || xvar > num_fx) {
stop("The index for the x-variable (xvar) is out of range.")
}
fidx = xvar
}
}
if (missing(uvar)) {
stop("The indicator variable (uvar) is required.")
} else {
if (is.character(uvar)) {
uvar <- toupper(uvar)
index <- pmatch(uvar, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", uvar)
stop(cstr)
}
ridx = index
} else {
if (uvar <= 0 || uvar > num_r) {
stop("The index for the indicator (uvar) is out of range.")
}
ridx = uvar
}
}
if (missing(group)) {
stop("The group index (group) is required.")
} else {
if (group <= 0 || group > props[4]) {
stop("The group index (group) is out of range.")
}
}
if (missing(xvector)) {
stop("The vector (xvector) containing values for the x-axis is required.")
}
if (missing(covariates)) {
means <- mplus.get.group.dataset(file,'irt_data','mean')
covariates <- means[,group]
} else {
if (length(covariates) != num_fx) {
cstr <- sprintf("The length of the covariates vector should be %d.\nFound: %d", num_fx, length(covariates))
stop(cstr)
}
}
links <- mplus.get.group.attribute(file,'categorical_data','link')
shift <- 0.0
for (i in c(1:num_fx)) {
if (i != fidx) {
shift <- shift + covariates[i]*gh5$irt_data$loading[ridx,i,group]
}
}
prob <- array(0,c(length(xvector)))
for (i in c(1:length(xvector))) {
x <- xvector[i]
if (cat == 1) {
p <- gh5$irt_data$tau[cat,ridx,group] - shift - x * gh5$irt_data$loading[fidx,ridx,group]
p <- p * gh5$irt_data$scale[ridx,group]
prob[i] <- lin(p,links[ridx])
} else if (cat == max_num_cat) {
p = gh5$irt_data$tau[cat-1,ridx,group] - shift - x * gh5$irt_data$loading[fidx,ridx,group]
p = p * gh5$irt_data$scale[ridx,group]
prob[i] = 1.0 - lin(p,links[ridx])
} else {
p = gh5$irt_data$tau[cat,ridx,group] - shift - x * gh5$irt_data$loading[fidx,ridx,group]
p = p * gh5$irt_data$scale[ridx,group]
p2 = gh5$irt_data$tau[cat-1,ridx,group] - shift - x * gh5$irt_data$loading[fidx,ridx,group]
p2 = p2 * gh5$irt_data$scale[ridx,group]
prob[i] = lin(p,links[ridx]) - lin(p2,links[ridx])
}
}
prob
}
#========================================================================
# mplus.compute.irt.iic
#
# arguments:
# file - the quoted name of an existing GH5 file (required)
# group - the group number (required)
# xvar - the variable for the x-axis, can be the variable index or quoted variable name (required)
# uvar - the indicator variable, can be the variable index or the quoted variable name (required)
# xvector -> the vector containing x values to use (required)
# covariates -> the vector containing values for all the other covariates (not required, sample mean used if not given)
#
# eg. mplus.compute.irt.iic('ex7.27.gh5',1,'F','U1',seq(-3,3,0.2))
#
mplus.compute.irt.iic <- function(file,group,xvar,uvar,xvector,covariates) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if irt data exists
if ( !("irt_data" %in% names(gh5)) ) {
stop("IRT data is required.\n\nUse TYPE=PLOT2.")
}
# properties[1] - number of factors
# properties[2] - number of factors/covariates
# properties[3] - number of indicators
# properties[4] - number of classes
# properties[5] - maximum number of categories
props <- mplus.get.group.attribute(file,'irt_data','properties')
num_fx <- as.integer(props[2])
num_r <- as.integer(props[3])
max_num_cat <- as.integer(props[5])
if (missing(xvar)) {
stop("The x-axis variable (xvar) is required.")
} else {
if (is.character(xvar)) {
index <- pmatch(xvar, flabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown x-variable: %s\n", xvar)
stop(cstr)
}
fidx = index
} else {
if (xvar <= 0 || xvar > num_fx) {
stop("The index for the x-variable (xvar) is out of range.")
}
fidx = xvar
}
}
if (missing(uvar)) {
stop("The indicator variable (uvar) is required.")
} else {
if (is.character(uvar)) {
index <- pmatch(uvar, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", uvar)
stop(cstr)
}
ridx = index
} else {
if (uvar <= 0 || uvar > num_r) {
stop("The index for the indicator (uvar) is out of range.")
}
ridx = uvar
}
}
if (missing(group)) {
stop("The group index (group) is required.")
} else {
if (group <= 0 || group > props[4]) {
stop("The group index (group) is out of range.")
}
}
if (missing(xvector)) {
stop("The vector (xvector) containing values for the x-axis is required.")
}
if (missing(covariates)) {
covariates <- mplus.get.group.dataset(file,'irt_data','mean')
} else {
if (length(covariates) != num_fx) {
cstr <- sprintf("The length of the covariates vector should be %d.\nFound: %d", num_fx, length(covariates))
stop(cstr)
}
}
categories <- mplus.get.group.attribute(file,'irt_data','categories')
links <- mplus.get.group.attribute(file,'categorical_data','link')
shift <- 0.0
for (i in c(1:num_fx)) {
if (i != fidx) {
shift <- shift + covariates[i]*gh5$irt_data$loading[ridx,i,group]
}
}
categories <- as.numeric(categories)
probvec <- array(0, c(length(xvector),categories[ridx]+1))
for (i in c(1:length(xvector))) {
x <- xvector[i]
probvec[1] <- 0
for (j in c(2:c(categories[ridx]))) {
fp = gh5$irt_data$tau[j-1,ridx,group] - shift - x * gh5$irt_data$loading[fidx,ridx,group]
fp = fp * gh5$irt_data$scale[ridx,group]
dp = lin(fp,links[ridx])
probvec[i,j] <- dp
}
probvec[i,categories[ridx]+1]=1.0
}
prob <- array(0,c(length(xvector)))
for (i in c(1:length(xvector))) {
x <- xvector[i]
for (j in c(2:c(categories[ridx]+1))) {
r <- 10**(-10)
ep = probvec[i,j] - probvec[i,j-1]
if (ep < r) { ep <- r }
dp = gh5$irt_data$scale[ridx,group] * gh5$irt_data$loading[fidx,ridx,group] * gh5$irt_data$scale[ridx,group] * gh5$irt_data$loading[fidx,ridx,group];
p = (probvec[i,j] * (1-probvec[i,j])) - (probvec[i,j-1] * (1-probvec[i,j-1]))
prob[i] <- prob[i] + p * p * dp / ep
}
}
prob
}
#========================================================================
# mplus.plot.irt.icc
#
# arguments:
# file - the quoted name of an existing GH5 file (required)
# group - the group number (not required) -- 1 if not specified
# xvar - the variable for the x-axis, can be the variable index or quoted variable name (not required, uses the first x)
# uvar - the indicator variable or vector containing more than one indicator variable
# - can be the variable index or the quoted variable name
# - if not given, assume all indicator variables but cat must be given (not required)
# cat - the category number
# - if not given, assume all categories for the given indicator variables
# - required if uvar not given
# cat2 - the second category number if range of categories is desired (not required)
# covariates -> the vector containing values for all the other covariates (not required, sample mean used if not given)
# xrange - the type of range for the x-axis (not required)
# - xrange=1: -1 s.d to +1 s.d of xvar
# - xrange=2: -2 s.d to +2 s.d of xvar
# - xrange=3: -3 s.d to +3 s.d of xvar (default)
# - xrange=4: -4 s.d to +4 s.d of xvar
# - xrange=5: -5 s.d to +5 s.d of xvar
# - xrange=6: -6 s.d to +6 s.d of xvar
# xstep - the step increment for the x-axis range (not required)
# - xstep=1: 1.0
# - xstep=2: 0.5
# - xstep=3: 0.1
# - xstep=4: 0.05
# - xstep=5: 1/2 s.d of xvar
# - xstep=6: 1/4 s.d of xvar
# - xstep=7: 1/5 s.d of xvar (default)
# - xstep=8: 1/10 s.d of xvar
# - xstep=9: 1/20 s.d of xvar
# - xstep=10: 1/50 s.d of xvar
# - xstep=11: 1/100 s.d of xvar
#
# eg. mplus.plot.irt.icc('ex7.27.gh5',1,'F','U1',)
#
mplus.plot.irt.icc <- function(file,group=1,xvar=1,uvar,cat,cat2,covariates,xrange=3,xstep=7,lloc="top") {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if irt data exists
if ( !("irt_data" %in% names(gh5)) ) {
stop("This function requires IRT data.\n\nUse TYPE=PLOT2.")
}
# properties[1] - number of factors
# properties[2] - number of factors/covariates
# properties[3] - number of indicators
# properties[4] - number of classes
# properties[5] - maximum number of categories
props <- mplus.get.group.attribute(file,'irt_data','properties')
flabels <- mplus.get.group.attribute(file,'irt_data','flabels')
flabels <- gsub("(^\\s+|\\s+$)", "", flabels, perl=TRUE)
ulabels <- mplus.get.group.attribute(file,'irt_data','ulabels')
ulabels <- gsub("(^\\s+|\\s+$)", "", ulabels, perl=TRUE)
num_fx <- as.integer(props[2])
num_r <- as.integer(props[3])
max_num_cat <- as.integer(props[5])
if (is.character(xvar)) {
xvar <- toupper(xvar)
index <- pmatch(xvar, flabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown variable for the x-axis: %s\n", xvar)
stop(cstr)
}
fidx = index
} else {
if (xvar <= 0 || xvar > num_fx) {
stop("The index for the x-variable (xvar) is out of range.")
}
fidx = xvar
}
if (missing(uvar)) {
} else if (length(uvar) > 1) {
ridx <- vector()
for (r in c(1:length(uvar))) {
var <- uvar[r]
if (is.character(var)) {
var <- toupper(var)
index <- pmatch(var, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", var)
stop(cstr)
}
ridx[r] = index
} else {
if (var <= 0 || var > num_r) {
stop("The index for the indicator in uvar is out of range.")
}
ridx[r] = var
}
}
} else {
if (is.character(uvar)) {
uvar <- toupper(uvar)
index <- pmatch(uvar, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", uvar)
stop(cstr)
}
ridx <- index
} else {
if (uvar <= 0 || uvar > num_r) {
stop("The index for the indicator (uvar) is out of range.")
}
ridx <- uvar
}
}
if (group <= 0 || group > props[4]) {
stop("The group index (group) is out of range.")
}
if (missing(covariates)) {
xmean <- mplus.get.group.dataset(file,'irt_data','mean')
covariates <- xmean[,group]
} else {
if (length(covariates) != num_fx) {
cstr <- sprintf("The length of the covariates vector should be %d.", num_fx)
stop(cstr)
}
}
categories <- mplus.get.group.attribute(file,'irt_data','categories')
if (missing(uvar)) {
# case 1: uvar not specified, we plot ICC for all variables. The category number must be given.
if (missing(cat)) {
stop("The category number (cat) is required when plotting ICCs for all variables.")
}
for (i in c(1:num_r)) {
if (cat <= 0 || cat > categories[i]) {
cstr <- sprintf("The category number (cat) is out of range for variable %s.", ulabels[i])
stop(cstr)
}
}
if (!(missing(cat2))) {
if (cat > cat2) {
cstr <- sprintf("The first category number (cat2=%d) must be smaller than the second category number (cat2=%d).", cat, cat2)
stop(cstr)
}
for (i in c(1:num_r)) {
if (cat2 <= 0 || cat2 > categories[i]) {
cstr <- sprintf("The second category number (cat2) is out of range for variable %s.", ulabels[i])
stop(cstr)
}
}
}
} else if (length(uvar) > 1) {
for (r in c(1:length(ridx))) {
if (!(missing(cat))) {
if (cat <= 0 || cat > categories[ridx[r]]) {
cstr <- sprintf("The category (cat) is out of range for variable %s.", ulabels[ridx[r]])
stop(cstr)
}
} else {
# cat is missing but cat2 isn't!
if (!(missing(cat2))) {
stop("The first category (cat) is required if the second category (cat2) is given.")
}
}
if (!(missing(cat2))) {
if (cat2 <= 0 || cat2 > categories[ridx[r]]) {
cstr <- sprintf("The category (cat2) is out of range for variable %s.", ulabels[ridx[r]])
stop(cstr)
}
if (cat > cat2) {
cstr <- sprintf("The first category (cat2=%d) must be smaller than the second category (cat2=%d).", cat, cat2)
stop(cstr)
}
}
}
} else {
if (!(missing(cat))) {
if (cat <= 0 || cat > categories[ridx]) {
cstr <- sprintf("The category (cat) is out of range for variable %s.", ulabels[ridx])
stop(cstr)
}
} else {
# cat is missing but cat2 isn't!
if (!(missing(cat2))) {
stop("The first category (cat) is required if the second category (cat2) is given.")
}
}
if (!(missing(cat2))) {
if (cat2 <= 0 || cat2 > categories[ridx]) {
cstr <- sprintf("The category (cat2) is out of range for variable %s.", ulabels[ridx])
stop(cstr)
}
if (cat > cat2) {
cstr <- sprintf("The first category (cat2=%d) must be smaller than the second category (cat2=%d).", cat, cat2)
stop(cstr)
}
}
}
if (!(missing(xrange))) {
if (xrange <= 0 || xrange > 6) {
stop("The xrange type should be between 1 and 6.")
}
}
if (!(missing(xstep))) {
if (xstep <= 0 || xstep > 11) {
stop("The xstep type should be between 1 and 11.")
}
}
variances <- mplus.get.group.dataset(file,'irt_data','variance')
means <- mplus.get.group.dataset(file,'irt_data','mean')
fsd = sqrt(variances[fidx])
xmult <- switch(xrange, 1, 2, 3, 4, 5, 6)
vmin = means[fidx] + (-1) * xmult * fsd
vmax = means[fidx] + xmult * fsd
vstep = switch(xstep, 1.0, 0.5, 0.1, 0.05, 0.5*fsd, 0.25*fsd, 0.2*fsd, 0.1*fsd, 0.05*fsd, 0.02*fsd, 0.01*fsd)
steps <- seq(vmin,vmax,by=vstep)
print(steps)
# if cat is missing, then we plot all categories
if (missing(uvar)) {
prob <- array(0,c(num_r,length(steps)))
xx <- array(0,c(num_r,length(steps)))
if (missing(cat2)) {
for (r in c(1:num_r)) {
prob[r,] <- mplus.compute.irt.icc(file,group,fidx,r,cat,xvector=steps,covariates=covariates)
xx[r,] <- steps
}
} else {
for (r in c(1:num_r)) {
for (c in c(cat:cat2)) {
prob[r,] <- prob[r,] + mplus.compute.irt.icc(file,group,fidx,r,c,xvector=steps,covariates=covariates)
}
xx[r,] <- steps
}
}
# plot the icc
cstr <- sprintf("Item characteristic curves as a function of %s, Class %d", flabels[fidx], group)
colors <- rainbow(num_r)
plot(xx,prob,xlab=flabels[fidx],ylab="Probability",main=cstr,type='n')
for (i in c(1:num_r)) {
lines(xx[i,],prob[i,],col=colors[i])
}
ldesc <- array(0,c(num_r))
lty <- array(0,c(num_r))
lwd <- array(0,c(num_r))
for (i in c(1:num_r)) {
if (missing(cat2)) {
ldesc[i] <- sprintf("%s, Category %d", ulabels[i], cat)
} else {
ldesc[i] <- sprintf("%s, Cat %d to %d", ulabels[i], cat, cat2)
}
lty[i] = 1
lwd[i] = 2.5
}
legend(lloc,ldesc,col=colors,lty=lty,lwd=lwd)
} else if (length(ridx) > 1) {
prob <- array(0,c(length(ridx),length(steps)))
xx <- array(0,c(length(ridx),length(steps)))
if (missing(cat)) {
for (j in c(1:categories[ridx])) {
prob[j,] <- mplus.compute.irt.icc(file,group,fidx,ridx,j,xvector=steps,covariates=covariates)
xx[j,] <- steps
}
} else if (missing(cat2)) {
for (r in c(1:length(ridx))) {
prob[r,] <- mplus.compute.irt.icc(file,group,fidx,ridx[r],cat,xvector=steps,covariates=covariates)
xx[r,] <- steps
}
} else {
for (r in c(1:length(ridx))) {
for (c in c(cat:cat2)) {
prob[r,] <- prob[r,] + mplus.compute.irt.icc(file,group,fidx,ridx[r],c,xvector=steps,covariates=covariates)
}
xx[r,] <- steps
}
}
# plot the icc
cstr <- sprintf("Item characteristic curves as a function of %s, Class %d", flabels[fidx], group)
colors <- rainbow(length(ridx))
plot(xx,prob,xlab=flabels[fidx],ylab="Probability",main=cstr,type='n')
for (i in c(1:length(ridx))) {
lines(xx[i,],prob[i,],col=colors[i])
}
ldesc <- array(0,c(length(ridx)))
lty <- array(0,c(length(ridx)))
lwd <- array(0,c(length(ridx)))
for (i in c(1:length(ridx))) {
if (missing(cat2)) {
ldesc[i] <- sprintf("%s, Category %d", ulabels[ridx[i]], cat)
} else {
ldesc[i] <- sprintf("%s, Cat %d to %d", ulabels[ridx[i]], cat, cat2)
}
lty[i] = 1
lwd[i] = 2.5
}
legend(lloc,ldesc,col=colors,lty=lty,lwd=lwd)
} else if (missing(cat)) {
prob <- array(0,c(categories[ridx],length(steps)))
xx <- array(0,c(categories[ridx],length(steps)))
for (j in c(1:categories[ridx])) {
prob[j,] <- mplus.compute.irt.icc(file,group,fidx,ridx,j,steps,covariates)
xx[j,] <- steps
}
# plot the icc
cstr <- sprintf("Item characteristic curve for %s (all categories)\n as a function of %s, Class %d", ulabels[ridx], flabels[fidx], group)
colors <- rainbow(categories[ridx])
plot(xx,prob,xlab=flabels[fidx],ylab="Probability",main=cstr,type='n',ylim=c(0,1))
for (i in c(1:categories[ridx])) {
lines(xx[i,],prob[i,],col=colors[i])
}
ldesc <- vector()
for (i in c(1:categories[ridx])) {
ldesc[i] <- sprintf("%s, Category %d", ulabels[ridx], i)
}
legend(lloc,ldesc,col=colors,lty=c(1,1,1,1,1),lwd=c(2.5,2.5,2.5,2.5,2.5))
} else if (missing(cat2)) {
# if cat2 is missing, then we plot only the given category
prob <- mplus.compute.irt.icc(file,group,fidx,ridx,cat,steps,covariates)
# plot the icc
cstr <- sprintf("Item characteristic curve for %s (category %d)\n as a function of %s, Class %d", ulabels[ridx], cat, flabels[fidx], group)
plot(steps,prob,xlab=flabels[fidx],ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(steps,prob,col='red')
} else {
# if cat and cat2 are given, then we plot the sum from cat to cat2
prob <- array(0,c(length(steps)))
for (c in c(cat:cat2)) {
prob <- prob + mplus.compute.irt.icc(file,group,fidx,ridx,c,steps,covariates)
}
# plot the icc
cstr <- sprintf("Item characteristic curve for %s\n(sum from category %d to category %d)\nas a function of %s, Class %d", ulabels[ridx], cat, cat2, flabels[fidx], group)
plot(steps,prob,xlab=flabels[fidx],ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(steps,prob,col='red')
}
steps
}
#========================================================================
# mplus.plot.irt.iic
#
# arguments:
# file - the quoted name of an existing GH5 file (required)
# group - the group number (not required)
# - if not given, group=1 will be used
# xvar - the variable for the x-axis, can be the variable index or quoted variable name (not required, uses the first x)
# uvar - the indicator variable or vector containing more than one indicator variable
# - can be the variable index or the quoted variable name
# - if not given, assume all indicator variables (not required)
# covariates -> the vector containing values for all the other covariates (not required, sample mean used if not given)
# xrange - the type of range for the x-axis (not required)
# - xrange=1: -1 s.d to +1 s.d of xvar
# - xrange=2: -2 s.d to +2 s.d of xvar
# - xrange=3: -3 s.d to +3 s.d of xvar (default)
# - xrange=4: -4 s.d to +4 s.d of xvar
# - xrange=5: -5 s.d to +5 s.d of xvar
# - xrange=6: -6 s.d to +6 s.d of xvar
# xstep - the step increment for the x-axis range (not required)
# - xstep=1: 1.0
# - xstep=2: 0.5
# - xstep=3: 0.1
# - xstep=4: 0.05
# - xstep=5: 1/2 s.d of xvar
# - xstep=6: 1/4 s.d of xvar
# - xstep=7: 1/5 s.d of xvar (default)
# - xstep=8: 1/10 s.d of xvar
# - xstep=9: 1/20 s.d of xvar
# - xstep=10: 1/50 s.d of xvar
# - xstep=11: 1/100 s.d of xvar
#
# eg. mplus.plot.irt.iic('ex7.27.gh5',1,'F','U1',)
#
mplus.plot.irt.iic <- function(file,group=1,xvar=1,uvar,covariates,xrange=3,xstep=7,lloc="top") {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if irt data exists
if ( !("irt_data" %in% names(gh5)) ) {
stop("This function requires IRT data.\n\nUse TYPE=PLOT2.")
}
# properties[1] - number of factors
# properties[2] - number of factors/covariates
# properties[3] - number of indicators
# properties[4] - number of classes
# properties[5] - maximum number of categories
props <- mplus.get.group.attribute(file,'irt_data','properties')
flabels <- mplus.get.group.attribute(file,'irt_data','flabels')
flabels <- gsub("(^\\s+|\\s+$)", "", flabels, perl=TRUE)
flabels <- tolower(flabels)
ulabels <- mplus.get.group.attribute(file,'irt_data','ulabels')
ulabels <- gsub("(^\\s+|\\s+$)", "", ulabels, perl=TRUE)
ulabels <- tolower(ulabels)
num_fx <- as.integer(props[2])
num_r <- as.integer(props[3])
max_num_cat <- as.integer(props[5])
if (is.character(xvar)) {
xvar <- tolower(xvar)
index <- pmatch(xvar, flabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown variable for the x-axis: %s\n", xvar)
stop(cstr)
}
fidx = index
} else {
if (xvar <= 0 || xvar > num_fx) {
stop("The index for the x-variable (xvar) is out of range.")
}
fidx = xvar
}
if (missing(uvar)) {
} else if (length(uvar) > 1) {
ridx <- vector()
for (r in c(1:length(uvar))) {
var <- uvar[r]
if (is.character(var)) {
index <- pmatch(var, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", var)
stop(cstr)
}
ridx[r] = index
} else {
if (var <= 0 || var > num_r) {
stop("The index for the indicator in uvar is out of range.")
}
ridx[r] = var
}
}
} else {
if (is.character(uvar)) {
uvar <- tolower(uvar)
index <- pmatch(uvar, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", uvar)
stop(cstr)
}
ridx = index
} else {
if (uvar <= 0 || uvar > num_r) {
stop("The index for the indicator (uvar) is out of range.")
}
ridx = uvar
}
}
if (group <= 0 || group > props[4]) {
stop("The group index (group) is out of range.")
}
if (missing(covariates)) {
xmean <- mplus.get.group.dataset(file,'irt_data','mean')
covariates <- xmean[,group]
} else {
if (length(covariates) != num_fx) {
cstr <- sprintf("The length of the covariates vector should be %d.", num_fx)
stop(cstr)
}
}
categories <- mplus.get.group.attribute(file,'irt_data','categories')
if (!(missing(xrange))) {
if (xrange <= 0 || xrange > 6) {
stop("The xrange type should be between 1 and 6.")
}
}
if (!(missing(xstep))) {
if (xstep <= 0 || xstep > 11) {
stop("The xstep type should be between 1 and 11.")
}
}
variances <- mplus.get.group.dataset(file,'irt_data','variance')
means <- mplus.get.group.dataset(file,'irt_data','mean')
fsd = sqrt(variances[fidx])
xmult <- switch(xrange, 1, 2, 3, 4, 5, 6)
vmin = means[fidx] + (-1) * xmult * fsd
vmax = means[fidx] + xmult * fsd
vstep = switch(xstep, 1.0, 0.5, 0.1, 0.05, 0.5*fsd, 0.25*fsd, 0.2*fsd, 0.1*fsd, 0.05*fsd, 0.02*fsd, 0.01*fsd)
steps <- seq(vmin,vmax,by=vstep)
# if cat is missing, then we plot all categories
if (missing(uvar)) {
prob <- array(0,c(num_r,length(steps)))
xx <- array(0,c(num_r,length(steps)))
for (r in c(1:num_r)) {
prob[r,] <- mplus.compute.irt.iic(file,group,fidx,r,xvector=steps,covariates=covariates)
xx[r,] <- steps
}
# plot the iic
cstr <- sprintf("Item information curves as a function of %s, Class %d", flabels[fidx], group)
colors <- rainbow(num_r)
plot(xx,prob,xlab=flabels[fidx],ylab="Information",main=cstr,type='n')
for (i in c(1:num_r)) {
lines(xx[i,],prob[i,],col=colors[i])
}
ldesc <- array(0,c(num_r))
lty <- array(0,c(num_r))
lwd <- array(0,c(num_r))
for (i in c(1:num_r)) {
ldesc[i] <- sprintf("%s", ulabels[i])
lty[i] = 1
lwd[i] = 2.5
}
legend(lloc,ldesc,col=colors,lty=lty,lwd=lwd)
} else if (length(ridx) > 1) {
prob <- array(0,c(length(ridx),length(steps)))
xx <- array(0,c(length(ridx),length(steps)))
for (r in c(1:length(ridx))) {
prob[r,] <- mplus.compute.irt.iic(file,group,fidx,ridx[r],xvector=steps,covariates=covariates)
xx[r,] <- steps
}
# plot the iic
cstr <- sprintf("Item information curves as a function of %s, Class %d", flabels[fidx], group)
colors <- rainbow(length(ridx))
plot(xx,prob,xlab=flabels[fidx],ylab="Information",main=cstr,type='n')
for (i in c(1:length(ridx))) {
lines(xx[i,],prob[i,],col=colors[i])
}
# for (i in c(1:length(steps))) {
# cstr <- sprintf("x = %0.3f, probx = %0.3f", xx[1,i], prob[1,i])
# print(cstr)
# }
ldesc <- array(0,c(length(ridx)))
lty <- array(0,c(length(ridx)))
lwd <- array(0,c(length(ridx)))
for (i in c(1:length(ridx))) {
ldesc[i] <- sprintf("%s", ulabels[ridx[i]])
lty[i] = 1
lwd[i] = 2.5
}
legend(lloc,ldesc,col=colors,lty=lty,lwd=lwd)
} else {
prob <- mplus.compute.irt.iic(file,group,fidx,ridx,steps,covariates)
# for (i in c(1:length(steps))) {
# cstr <- sprintf("x = %0.3f, probx = %0.3f", steps[i], prob[i])
# print(cstr)
# }
# plot the iic
cstr <- sprintf("Item information curve for %s as a function of %s, Class %d", ulabels[ridx], flabels[fidx], group)
plot(steps,prob,xlab=flabels[fidx],ylab="Information",main=cstr,type='n')
lines(steps,prob,col='red')
}
}
#========================================================================
# mplus.plot.irt.tic
#
# arguments:
# file - the quoted name of an existing GH5 file (required)
# group - the group number (not required)
# - if not given, group=1 will be shown
# xvar - the variable for the x-axis, can be the variable index or quoted variable name (not required, uses the first x)
# uvar - the indicator variable or vector containing more than one indicator variable
# - can be the variable index or the quoted variable name
# - if not given, assume all indicator variables (not required)
# covariates -> the vector containing values for all the other covariates (not required, sample mean used if not given)
# xrange - the type of range for the x-axis (not required)
# - xrange=1: -1 s.d to +1 s.d of xvar
# - xrange=2: -2 s.d to +2 s.d of xvar
# - xrange=3: -3 s.d to +3 s.d of xvar (default)
# - xrange=4: -4 s.d to +4 s.d of xvar
# - xrange=5: -5 s.d to +5 s.d of xvar
# - xrange=6: -6 s.d to +6 s.d of xvar
# xstep - the step increment for the x-axis range (not required)
# - xstep=1: 1.0
# - xstep=2: 0.5
# - xstep=3: 0.1
# - xstep=4: 0.05
# - xstep=5: 1/2 s.d of xvar
# - xstep=6: 1/4 s.d of xvar
# - xstep=7: 1/5 s.d of xvar (default)
# - xstep=8: 1/10 s.d of xvar
# - xstep=9: 1/20 s.d of xvar
# - xstep=10: 1/50 s.d of xvar
# - xstep=11: 1/100 s.d of xvar
#
# eg. mplus.plot.irt.tic('ex7.27.gh5',1,'F','U1',)
#
mplus.plot.irt.tic <- function(file,group=1,xvar=1,uvar,covariates,xrange=3,xstep=7) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if irt data exists
if ( !("irt_data" %in% names(gh5)) ) {
stop("This function requires IRT data.\n\nUse TYPE=PLOT2.")
}
# properties[1] - number of factors
# properties[2] - number of factors/covariates
# properties[3] - number of indicators
# properties[4] - number of classes
# properties[5] - maximum number of categories
props <- mplus.get.group.attribute(file,'irt_data','properties')
flabels <- mplus.get.group.attribute(file,'irt_data','flabels')
flabels <- gsub("(^\\s+|\\s+$)", "", flabels, perl=TRUE)
flabels <- tolower(flabels)
ulabels <- mplus.get.group.attribute(file,'irt_data','ulabels')
ulabels <- gsub("(^\\s+|\\s+$)", "", ulabels, perl=TRUE)
ulabels <- tolower(ulabels)
num_fx <- as.integer(props[2])
num_r <- as.integer(props[3])
max_num_cat <- as.integer(props[5])
if (is.character(xvar)) {
index <- pmatch(xvar, flabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown variable for the x-axis: %s\n", xvar)
stop(cstr)
}
fidx = index
} else {
if (xvar <= 0 || xvar > num_fx) {
stop("The index for the x-variable (xvar) is out of range.")
}
fidx = xvar
}
if (missing(uvar)) {
} else if (length(uvar) > 1) {
ridx <- vector()
for (r in c(1:length(uvar))) {
var <- uvar[r]
if (is.character(var)) {
index <- pmatch(var, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", var)
stop(cstr)
}
ridx[r] = index
} else {
if (var <= 0 || var > num_r) {
stop("The index for the indicator in uvar is out of range.")
}
ridx[r] = var
}
}
} else {
if (is.character(uvar)) {
index <- pmatch(uvar, ulabels, nomatch=0)
if (index == 0) {
cstr <- sprintf("Unknown indicator: %s\n", uvar)
stop(cstr)
}
ridx = index
} else {
if (uvar <= 0 || uvar > num_r) {
stop("The index for the indicator (uvar) is out of range.")
}
ridx = uvar
}
}
if (group <= 0 || group > props[4]) {
stop("The group index (group) is out of range.")
}
if (missing(covariates)) {
xmean <- mplus.get.group.dataset(file,'irt_data','mean')
covariates <- xmean[,group]
} else {
if (length(covariates) != num_fx) {
cstr <- sprintf("The length of the covariates vector should be %d.", num_fx)
stop(cstr)
}
}
categories <- mplus.get.group.attribute(file,'irt_data','categories')
if (!(missing(xrange))) {
if (xrange <= 0 || xrange > 6) {
stop("The xrange type should be between 1 and 6.")
}
}
if (!(missing(xstep))) {
if (xstep <= 0 || xstep > 11) {
stop("The xstep type should be between 1 and 11.")
}
}
variances <- mplus.get.group.dataset(file,'irt_data','variance')
means <- mplus.get.group.dataset(file,'irt_data','mean')
fsd = sqrt(variances[fidx])
xmult <- switch(xrange, 1, 2, 3, 4, 5, 6)
vmin = means[fidx] + (-1) * xmult * fsd
vmax = means[fidx] + xmult * fsd
vstep = switch(xstep, 1.0, 0.5, 0.1, 0.05, 0.5*fsd, 0.25*fsd, 0.2*fsd, 0.1*fsd, 0.05*fsd, 0.02*fsd, 0.01*fsd)
steps <- seq(vmin,vmax,by=vstep)
# if cat is missing, then we plot all categories
if (missing(uvar)) {
prob <- array(0,c(length(steps)))
for (r in c(1:num_r)) {
prob <- prob + mplus.compute.irt.iic(file,group,fidx,r,xvector=steps,covariates=covariates)
}
prob <- prob + 1 / gh5$irt_data$variance[fidx,group]
# plot the tic
cstr <- sprintf("Total information curve as a function of %s, Class %d", flabels[fidx], group)
plot(steps,prob,xlab=flabels[fidx],ylab="Information",main=cstr,type='n')
lines(steps,prob,col='red')
} else if (length(ridx) > 1) {
prob <- array(0,c(length(steps)))
for (r in c(1:length(ridx))) {
prob <- prob + mplus.compute.irt.iic(file,group,fidx,ridx[r],xvector=steps,covariates=covariates)
}
# plot the iic
cstr <- sprintf("Partial total information curve as a function of %s, Class %d", flabels[fidx], group)
plot(steps,prob,xlab=flabels[fidx],ylab="Information",main=cstr,type='n')
lines(steps,prob,col='red')
} else {
prob <- mplus.compute.irt.iic(file,group,fidx,ridx,steps,covariates)
# plot the tic
cstr <- sprintf("Partial total information curve as a function of %s, Class %d", flabels[fidx], group)
plot(steps,prob,xlab=flabels[fidx],ylab="Information",main=cstr,type='n')
lines(steps,prob,col='red')
}
# for (i in c(1:length(steps))) {
# cstr <- sprintf("x = %0.3f, probx = %0.5f", steps[i], prob[i])
# print(cstr)
# }
}
######################################################################################################
# Functions for Survival plots
######################################################################################################
#========================================================================
# mplus.list.survival.variables
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.survival.variables('ex6.21.gh5')
#
mplus.list.survival.variables <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
# cat(c("\nList of variables to use in the following functions:\n"))
# cat(c(" - mplus.compute.irt.icc\n"))
# cat(c(" - mplus.plot.irt.icc\n"))
cat(c("\nList of survival variables:\n"))
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
cstr <- sprintf("%s", label)
cstr <- gsub("(^\\s+|\\s+$)", "", cstr, perl=TRUE)
print(cstr)
}
}
#========================================================================
# mplus.get.survival.kaplanmeier.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# classnum - the group number (not required)
#
# eg. mplus.get.survival.kaplanmeier.values('ex6.21.gh5','T')
#
mplus.get.survival.kaplanmeier.values <- function(file,survvar,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("kaplan_meier1")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("kaplan_meier%d", classnum)
}
kmvals <- mplus.get.group.dataset(file,groupstr,datastr)
if (missing(time)) {
return(kmvals[,2])
} else {
return(kmvals[,1])
}
}
#========================================================================
# mplus.compute.survival.sample.logcumulative.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# classnum - the group number (not required)
#
# eg. mplus.compute.survival.sample.logcumulative.values('ex6.21.gh5','T')
#
mplus.compute.survival.sample.logcumulative.values <- function(file,survvar,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("kaplan_meier1")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("kaplan_meier%d", classnum)
}
kmvals <- mplus.get.group.dataset(file,groupstr,datastr)
y <- log(-log(kmvals[,2]))
return(y)
}
#========================================================================
# mplus.get.survival.baseline.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# survvar2 - ending survival variable for getting sequential time
# classnum - the group number (not required)
#
# eg. mplus.get.survival.baseline.values('ex6.21.gh5','T')
#
mplus.get.survival.baseline.values <- function(file,survvar,survvar2,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
if (!(missing(survvar2))) {
if (is.character(survvar2)) {
surv_idx2 <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar2) {
surv_idx2 = i
break
}
}
if (surv_idx2 == 0) {
stop("- unknown survival variable: ", survvar2)
}
} else {
if (survvar2 <= 0 || survvar2 > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx2 = survvar2
}
}
if (missing(survvar2)) {
groupstr <- sprintf("survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("estimated_survival")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("estimated_survival%d", classnum)
}
esvals <- mplus.get.group.dataset(file,groupstr,datastr)
if (missing(time)) {
return(esvals[,2])
} else {
return(esvals[,1])
}
} else {
# ending survival variable given so we need to link them sequentially
ylast <- 1
xlast <- 0
data <- vector()
time <- vector()
count <- 0
for (s in c(surv_idx:surv_idx2)) {
groupstr <- sprintf("survival_data/survival%d", s)
if (missing(classnum)) {
datastr <- sprintf("estimated_survival")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("estimated_survival%d", classnum)
}
esvals1 <- mplus.get.group.dataset(file,groupstr,datastr)
if (s == surv_idx) {
count <- length(esvals1[,1])
data[1:count] <- esvals1[,1]
time[1:count] <- estvals[,2]
} else {
n <- length(estvals1[,1])
}
}
}
}
#========================================================================
# mplus.compute.survival.estimated.logcumulative.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# classnum - the group number (not required)
#
# eg. mplus.compute.survival.estimated.logcumulative.values('ex6.21.gh5','T')
#
mplus.compute.survival.estimated.logcumulative.values <- function(file,survvar,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("estimated_survival1")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("estimated_survival%d", classnum)
}
esvals <- mplus.get.group.dataset(file,groupstr,datastr)
y <- log(-log(esvals[,2]))
return(y)
}
#========================================================================
# mplus.get.survival.basehazard.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# classnum - the group number (not required)
#
# eg. mplus.get.survival.basehazard.values('ex6.21.gh5','T')
#
mplus.get.survival.basehazard.values <- function(file,survvar,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("basehazard")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("basehazard%d", classnum)
}
bhvals <- mplus.get.group.dataset(file,groupstr,datastr)
if (missing(time)) {
return(bhvals[,2])
} else {
return(bhvals[,1])
}
}
#========================================================================
# mplus.plot.survival.kaplanmeier
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.survival.kaplanmeier('ex6.21.gh5','T')
#
mplus.plot.survival.kaplanmeier <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Kaplan-Meier curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
npoints[i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(dims[1],maxpoints))
yall <- array(NA, c(dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[i,1:npoints[i]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
yall[i,1:npoints[i]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i)
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(dims[1])
for (i in c(1:dims[1])) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(dims[1]))
lty <- array(0,c(dims[1]))
lwd <- array(0,c(dims[1]))
for (i in c(1:dims[1])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("- class number is out of range")
}
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum,0)
yy <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum)
plot(xx,yy,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(xx,yy,col='red')
}
}
#========================================================================
# mplus.plot.survival.baseline
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.survival.baseline('ex6.21.gh5','T')
#
mplus.plot.survival.baseline <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Estimated baseline survival curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
npoints[i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(dims[1],maxpoints))
yall <- array(NA, c(dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[i,1:npoints[i]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
yall[i,1:npoints[i]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i)
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(dims[1])
for (i in c(1:dims[1])) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(dims[1]))
lty <- array(0,c(dims[1]))
lwd <- array(0,c(dims[1]))
for (i in c(1:dims[1])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
yy <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum)
plot(xx,yy,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(xx,yy,col='red')
}
}
#========================================================================
# mplus.plot.survival.basehazard
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.survival.basehazard('ex6.21.gh5','T')
#
mplus.plot.survival.basehazard <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required.\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Estimated baseline hazard curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.survival.basehazard.values(file,surv_idx,i,0)
npoints[i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(dims[1],maxpoints))
yall <- array(NA, c(dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[i,1:npoints[i]] <- mplus.get.survival.basehazard.values(file,surv_idx,i,0)
yall[i,1:npoints[i]] <- mplus.get.survival.basehazard.values(file,surv_idx,i)
}
plot(xall,yall,xlab="Time",ylab="",main=cstr,type='n')
colors <- rainbow(dims[1])
for (i in c(1:dims[1])) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(dims[1]))
lty <- array(0,c(dims[1]))
lwd <- array(0,c(dims[1]))
for (i in c(1:dims[1])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
xx <- mplus.get.survival.basehazard.values(file,surv_idx,classnum,0)
yy <- mplus.get.survival.basehazard.values(file,surv_idx,classnum)
plot(xx,yy,xlab="Time",ylab="",main=cstr,type='n')
lines(xx,yy,col='red')
}
}
#========================================================================
# mplus.plot.survival.sample.logcumulative
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.survival.sample.logcumulative('ex6.21.gh5','T')
#
mplus.plot.survival.sample.logcumulative <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Sample log cumulative hazard curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
npoints[i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(dims[1],maxpoints))
yall <- array(NA, c(dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[i,1:npoints[i]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
yall[i,1:npoints[i]] <- mplus.compute.survival.sample.logcumulative.values(file,surv_idx,i)
for (j in c(1:npoints[i])) {
if (is.infinite(yall[j])) {
xall[j] = NA
}
}
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(dims[1])
for (i in c(1:dims[1])) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(dims[1]))
lty <- array(0,c(dims[1]))
lwd <- array(0,c(dims[1]))
for (i in c(1:dims[1])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum,0)
yy <- mplus.compute.survival.sample.logcumulative.values(file,surv_idx,classnum)
for (j in c(1:length(xx))) {
if (is.infinite(yy[j])) {
xx[j] = NA
}
}
plot(xx,yy,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(xx,yy,col='red')
}
}
#========================================================================
# mplus.plot.survival.estimated.logcumulative
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.survival.estimated.logcumulative('ex6.21.gh5','T')
#
mplus.plot.survival.estimated.logcumulative <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Estimated log cumulative hazard curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
npoints[i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(dims[1],maxpoints))
yall <- array(NA, c(dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[i,1:npoints[i]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
yall[i,1:npoints[i]] <- mplus.compute.survival.estimated.logcumulative.values(file,surv_idx,i)
for (j in c(1:npoints[i])) {
if (is.infinite(yall[j])) {
xall[j] = NA
}
}
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(dims[1])
for (i in c(1:dims[1])) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(dims[1]))
lty <- array(0,c(dims[1]))
lwd <- array(0,c(dims[1]))
for (i in c(1:dims[1])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
yy <- mplus.compute.survival.estimated.logcumulative.values(file,surv_idx,classnum)
for (j in c(1:length(xx))) {
if (is.infinite(yy[j])) {
xx[j] = NA
}
}
plot(xx,yy,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(xx,yy,col='red')
}
}
#========================================================================
# mplus.plot.survival.kaplanmeier.vs.baseline
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.survival.kaplanmeier.vs.baseline('ex6.21.gh5','T')
#
mplus.plot.survival.kaplanmeier.vs.baseline <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Kaplan-Meier curve compared with\nestimated baseline survival curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(2*dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
npoints[2*(i-1)+1] = length(xx)
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
npoints[2*i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(2*dims[1],maxpoints))
yall <- array(NA, c(2*dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[2*(i-1)+1,1:npoints[2*(i-1)+1]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
yall[2*(i-1)+1,1:npoints[2*(i-1)+1]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i)
xall[2*i,1:npoints[2*i]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
yall[2*i,1:npoints[2*i]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i)
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(2*dims[1])
for (i in c(1:(2*dims[1]))) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(2*dims[1]))
lty <- array(0,c(2*dims[1]))
lwd <- array(0,c(2*dims[1]))
for (i in c(1:dims[1])) {
ldesc[2*(i-1)+1] <- sprintf("KM for Class %d", i)
lty[2*(i-1)+1] = 1
lwd[2*(i-1)+1] = 2.5
ldesc[2*i] <- sprintf("ES for Class %d", i)
lty[2*i] = 1
lwd[2*i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
npoints <- array(0, c(2))
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum,0)
npoints[1] = length(xx)
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
npoints[2] = length(xx)
maxpoints = max(npoints)
xall <- array(NA, c(2,maxpoints))
yall <- array(NA, c(2,maxpoints))
xall[1,1:npoints[1]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum,0)
yall[1,1:npoints[1]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum)
xall[2,1:npoints[2]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
yall[2,1:npoints[2]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum)
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(2)
for (i in c(1:2)) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(2))
lty <- array(0,c(2))
lwd <- array(0,c(2))
ldesc[1] <- sprintf("KM for Class %d", classnum)
lty[1] = 1
lwd[1] = 2.5
ldesc[2] <- sprintf("ES for Class %d", classnum)
lty[2] = 1
lwd[2] = 2.5
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
}
}
#========================================================================
# mplus.plot.survival.sample.vs.estimated.logcumulative
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.survival.sample.vs.estimated.logcumulative('ex6.21.gh5','T')
#
mplus.plot.survival.sample.vs.estimated.logcumulative <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Sample log cumulative hazard curve compared with\nestimated log cumulative baseline hazard curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(2*dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
npoints[2*(i-1)+1] = length(xx)
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
npoints[2*i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(2*dims[1],maxpoints))
yall <- array(NA, c(2*dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[2*(i-1)+1,1:npoints[2*(i-1)+1]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,i,0)
yall[2*(i-1)+1,1:npoints[2*(i-1)+1]] <- mplus.compute.survival.sample.logcumulative.values(file,surv_idx,i)
xall[2*i,1:npoints[2*i]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=i,time=0)
yall[2*i,1:npoints[2*i]] <- mplus.compute.survival.estimated.logcumulative.values(file,surv_idx,i)
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(2*dims[1])
for (i in c(1:(2*dims[1]))) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(2*dims[1]))
lty <- array(0,c(2*dims[1]))
lwd <- array(0,c(2*dims[1]))
for (i in c(1:dims[1])) {
ldesc[2*(i-1)+1] <- sprintf("LC for Class %d", i)
lty[2*(i-1)+1] = 1
lwd[2*(i-1)+1] = 2.5
ldesc[2*i] <- sprintf("ELC for Class %d", i)
lty[2*i] = 1
lwd[2*i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
npoints <- array(0, c(2))
xx <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum,0)
npoints[1] = length(xx)
xx <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
npoints[2] = length(xx)
maxpoints = max(npoints)
xall <- array(NA, c(2,maxpoints))
yall <- array(NA, c(2,maxpoints))
xall[1,1:npoints[1]] <- mplus.get.survival.kaplanmeier.values(file,surv_idx,classnum,0)
yall[1,1:npoints[1]] <-mplus.compute.survival.sample.logcumulative.values(file,surv_idx,classnum)
xall[2,1:npoints[2]] <- mplus.get.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
yall[2,1:npoints[2]] <- mplus.compute.survival.estimated.logcumulative.values(file,surv_idx,classnum)
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(2)
for (i in c(1:2)) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(2))
lty <- array(0,c(2))
lwd <- array(0,c(2))
ldesc[1] <- sprintf("LC for Class %d", classnum)
lty[1] = 1
lwd[1] = 2.5
ldesc[2] <- sprintf("ELC for Class %d", classnum)
lty[2] = 1
lwd[2] = 2.5
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
}
}
######################################################################################################
# Functions for Discrete survival plots
######################################################################################################
#========================================================================
# mplus.list.discrete.survival.variables
#
# arguments:
# file - the quoted name of an existing GH5 file
#
# eg. mplus.list.discrete.survival.variables('ex6.21.gh5')
#
mplus.list.discrete.survival.variables <- function(file) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("discrete_survival_data" %in% names(gh5)) ) {
stop("- discrete survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'discrete_survival_data','properties')
# cat(c("\nList of variables to use in the following functions:\n"))
# cat(c(" - mplus.compute.irt.icc\n"))
# cat(c(" - mplus.plot.irt.icc\n"))
cat(c("\nList of survival variables:\n"))
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
cstr <- sprintf("%s", label)
cstr <- gsub("(^\\s+|\\s+$)", "", cstr, perl=TRUE)
print(cstr)
}
}
#========================================================================
# mplus.get.discrete.survival.kaplanmeier.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# classnum - the group number (not required)
#
# eg. mplus.get.discrete.survival.kaplanmeier.values('ex6.21.gh5','T')
#
mplus.get.discrete.survival.kaplanmeier.values <- function(file,survvar,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("discrete_survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'discrete_survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("discrete_survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("kaplan_meier1")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("kaplan_meier%d", classnum)
}
kmvals <- mplus.get.group.dataset(file,groupstr,datastr)
if (missing(time)) {
return(kmvals[,2])
} else {
return(kmvals[,1])
}
}
#========================================================================
# mplus.get.discrete.survival.baseline.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# survvar2 - ending survival variable for getting sequential time
# classnum - the group number (not required)
#
# eg. mplus.get.discrete.survival.baseline.values('ex6.21.gh5','T')
#
mplus.get.discrete.survival.baseline.values <- function(file,survvar,survvar2,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("discrete_survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'discrete_survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
if (!(missing(survvar2))) {
if (is.character(survvar2)) {
surv_idx2 <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar2) {
surv_idx2 = i
break
}
}
if (surv_idx2 == 0) {
stop("- unknown survival variable: ", survvar2)
}
} else {
if (survvar2 <= 0 || survvar2 > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx2 = survvar2
}
}
if (missing(survvar2)) {
groupstr <- sprintf("discrete_survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("estimated_survival")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("estimated_survival%d", classnum)
}
esvals <- mplus.get.group.dataset(file,groupstr,datastr)
if (missing(time)) {
return(esvals[,2])
} else {
return(esvals[,1])
}
} else {
# ending survival variable given so we need to link them sequentially
ylast <- 1
xlast <- 0
data <- vector()
time <- vector()
count <- 0
for (s in c(surv_idx:surv_idx2)) {
groupstr <- sprintf("discrete_survival_data/survival%d", s)
if (missing(classnum)) {
datastr <- sprintf("estimated_survival")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("estimated_survival%d", classnum)
}
esvals1 <- mplus.get.group.dataset(file,groupstr,datastr)
if (s == surv_idx) {
count <- length(esvals1[,1])
data[1:count] <- esvals1[,1]
time[1:count] <- estvals[,2]
} else {
n <- length(estvals1[,1])
}
}
}
}
#========================================================================
# mplus.get.discrete.survival.basehazard.values
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (required)
# classnum - the group number (not required)
#
# eg. mplus.get.discrete.survival.basehazard.values('ex6.21.gh5','T')
#
mplus.get.discrete.survival.basehazard.values <- function(file,survvar,classnum,time) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("discrete_survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
if (missing(survvar)) {
stop("The survival variable must be given.")
}
props <- mplus.get.group.attribute(file,'discrete_survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("discrete_survival_data/survival%d", surv_idx)
if (missing(classnum)) {
datastr <- sprintf("basehazard")
} else {
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
datastr <- sprintf("basehazard%d", classnum)
}
bhvals <- mplus.get.group.dataset(file,groupstr,datastr)
if (missing(time)) {
return(bhvals[,2])
} else {
return(bhvals[,1])
}
}
#========================================================================
# mplus.plot.discrete.survival.kaplanmeier
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.discrete.survival.kaplanmeier('ex6.21.gh5','T')
#
mplus.plot.discrete.survival.kaplanmeier <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("discrete_survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'discrete_survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("discrete_survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Kaplan-Meier curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,i,0)
npoints[i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(dims[1],maxpoints))
yall <- array(NA, c(dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[i,1:npoints[i]] <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,i,0)
yall[i,1:npoints[i]] <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,i)
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(dims[1])
for (i in c(1:dims[1])) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(dims[1]))
lty <- array(0,c(dims[1]))
lwd <- array(0,c(dims[1]))
for (i in c(1:dims[1])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("- class number is out of range")
}
xx <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,classnum,0)
yy <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,classnum)
plot(xx,yy,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(xx,yy,col='red')
}
}
#========================================================================
# mplus.plot.discrete.survival.baseline
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.discrete.survival.baseline('ex6.21.gh5','T')
#
mplus.plot.discrete.survival.baseline <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("discrete_survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'discrete_survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("discrete_survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Estimated baseline survival curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=i,time=0)
npoints[i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(dims[1],maxpoints))
yall <- array(NA, c(dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[i,1:npoints[i]] <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=i,time=0)
yall[i,1:npoints[i]] <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=i)
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
colors <- rainbow(dims[1])
for (i in c(1:dims[1])) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(dims[1]))
lty <- array(0,c(dims[1]))
lwd <- array(0,c(dims[1]))
for (i in c(1:dims[1])) {
ldesc[i] <- sprintf("Class %d", i)
lty[i] = 1
lwd[i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
xx <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
yy <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=classnum)
plot(xx,yy,xlab="Time",ylab="Probability",main=cstr,type='n',ylim=c(0,1))
lines(xx,yy,col='red')
}
}
#========================================================================
# mplus.plot.discrete.survival.kaplanmeier.vs.baseline
#
# arguments:
# file - the quoted name of an existing GH5 file
# survvar - the quoted name of the survival variable or the index of the survival variable (not required)
# classnum - the group number (not required)
#
# eg. mplus.plot.discrete.survival.kaplanmeier.vs.baseline('ex6.21.gh5','T')
#
mplus.plot.discrete.survival.kaplanmeier.vs.baseline <- function(file,survvar=1,classnum) {
if (missing(file)) {
stop("- name of the GH5 file is required")
}
if (!(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
# check if survival data exists
if ( !("discrete_survival_data" %in% names(gh5)) ) {
stop("- survival data is required\n\nUse TYPE=PLOT2.")
}
props <- mplus.get.group.attribute(file,'discrete_survival_data','properties')
if (is.character(survvar)) {
surv_idx <- 0
for (i in c(1:props[1])) {
cstr <- sprintf("discrete_survival_data/survival%d", i)
label <- mplus.get.group.attribute(file,cstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
if (label == survvar) {
surv_idx = i
break
}
}
if (surv_idx == 0) {
stop("- unknown survival variable: ", survvar)
}
} else {
if (survvar <= 0 || survvar > props[1]) {
stop("- index for the survival variable is out of range")
}
surv_idx = survvar
}
groupstr <- sprintf("discrete_survival_data/survival%d", surv_idx)
label <- mplus.get.group.attribute(file,groupstr,'label')
label <- gsub("(^\\s+|\\s+$)", "", label, perl=TRUE)
classes <- mplus.get.group.dataset(file,'/','model_group_labels')
dims <- attr(classes,'dim')
cstr <- sprintf("Kaplan-Meier curve compared with\nestimated baseline survival curve for %s", label)
if (missing(classnum)) {
npoints <- array(0, c(2*dims[1]))
for (i in c(1:dims[1])) {
xx <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,i,0)
npoints[2*(i-1)+1] = length(xx)
xx <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=i,time=0)
npoints[2*i] = length(xx)
}
maxpoints = max(npoints)
xall <- array(NA, c(2*dims[1],maxpoints))
yall <- array(NA, c(2*dims[1],maxpoints))
for (i in c(1:dims[1])) {
xall[2*(i-1)+1,1:npoints[2*(i-1)+1]] <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,i,0)
yall[2*(i-1)+1,1:npoints[2*(i-1)+1]] <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,i)
xall[2*i,1:npoints[2*i]] <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=i,time=0)
yall[2*i,1:npoints[2*i]] <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=i)
}
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n')
colors <- rainbow(2*dims[1])
for (i in c(1:(2*dims[1]))) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(2*dims[1]))
lty <- array(0,c(2*dims[1]))
lwd <- array(0,c(2*dims[1]))
for (i in c(1:dims[1])) {
ldesc[2*(i-1)+1] <- sprintf("KM for Class %d", i)
lty[2*(i-1)+1] = 1
lwd[2*(i-1)+1] = 2.5
ldesc[2*i] <- sprintf("ES for Class %d", i)
lty[2*i] = 1
lwd[2*i] = 2.5
}
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
} else {
if (classnum <= 0 || classnum > dims[1]) {
stop("Class number is out of range.")
}
npoints <- array(0, c(2))
xx <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,classnum,0)
npoints[1] = length(xx)
xx <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
npoints[2] = length(xx)
maxpoints = max(npoints)
xall <- array(NA, c(2,maxpoints))
yall <- array(NA, c(2,maxpoints))
xall[1,1:npoints[1]] <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,classnum,0)
yall[1,1:npoints[1]] <- mplus.get.discrete.survival.kaplanmeier.values(file,surv_idx,classnum)
xall[2,1:npoints[2]] <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=classnum,time=0)
yall[2,1:npoints[2]] <- mplus.get.discrete.survival.baseline.values(file,surv_idx,classnum=classnum)
plot(xall,yall,xlab="Time",ylab="Probability",main=cstr,type='n')
colors <- rainbow(2)
for (i in c(1:2)) {
lines(xall[i,],yall[i,],col=colors[i])
}
ldesc <- array(0,c(2))
lty <- array(0,c(2))
lwd <- array(0,c(2))
ldesc[1] <- sprintf("KM for Class %d", classnum)
lty[1] = 1
lwd[1] = 2.5
ldesc[2] <- sprintf("ES for Class %d", classnum)
lty[2] = 1
lwd[2] = 2.5
legend("top",ldesc,col=colors,lty=lty,lwd=lwd)
}
}
######################################################################################################
# Supporting functions
######################################################################################################
##########################################################################
#
# mplus.get.group.attribute - supporting function for getting attribute
#
# arguments:
# file - the quoted name of an existing GH5 file
# groupstr - the name of the group for the attribute
# attrstr - the name of the attribute
#
# eg. mplus.get.attribute('ex8.1.gh5','individual_data','var_names')
#
mplus.get.group.attribute <- function(file, groupstr, attrstr) {
if ( !(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
fid <- H5Fopen(file)
gid <- H5Gopen(fid, groupstr)
atid <- H5Aopen(gid, attrstr)
attr <- H5Aread(atid)
H5Aclose(atid)
H5Gclose(gid)
H5Fclose(fid)
attr <- gsub("(^\\s+|\\s+$)", "", attr, perl=TRUE)
return(attr)
}
##########################################################################
#
# mplus.get.group.attribute - supporting function for getting attribute
#
# arguments:
# file - the quoted name of an existing GH5 file
# groupstr - the name of the group for the attribute
# attrstr - the name of the attribute
#
# eg. mplus.get.attribute('ex8.1.gh5','individual_data','var_names')
#
mplus.get.dataset.attribute <- function(file, datastr, attrstr) {
if ( !(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
fid <- H5Fopen(file)
did <- H5Dopen(fid, datastr)
atid <- H5Aopen(did, attrstr)
attr <- H5Aread(atid)
H5Aclose(atid)
H5Dclose(did)
H5Fclose(fid)
return(attr)
}
##########################################################################
#
# mplus.get.group.dataset - supporting function for getting dataset
#
# arguments:
# file - the quoted name of an existing GH5 file
# groupstr - the name of the group for the attribute
# datastr - the name of the attribute
#
# eg. mplus.get.group.dataset('ex8.1.gh5','bayesian_data','statements')
#
mplus.get.group.dataset <- function(file, groupstr, datastr) {
if ( !(file.exists(file))) {
cstr <- paste("- file does not exist:",file,"\n")
stop(cstr)
}
gh5 <- h5dump(file, load=TRUE)
fid <- H5Fopen(file)
gid <- H5Gopen(fid, groupstr)
dtid <- H5Dopen(gid, datastr)
data <- H5Dread(dtid)
H5Dclose(dtid)
H5Gclose(gid)
H5Fclose(fid)
return(data)
}
estimate_mode <- function(x) {
d <- density(x)
d$x[which.max(d$y)]
}
######################################################################################################
# Math functions
######################################################################################################
lin <- function(y, link) {
if (link == 0) {
x <- logistic(y)
} else {
x <- pnorm(y, mean=0, sd=1)
}
x
}
logistic <- function(y) {
if (y > 50) {
x = 1
} else if (y > -50) {
x = 1 / (1 + exp(-y))
} else {
x = 0
}
x
}
|
testlist <- list(lims = structure(c(NA, Inf, 4.94065645841247e-324, 4.94065645841247e-324, 1.38523883680017e-309, 4.94065645841247e-324, 7.83611878861782e-317, 4.94065645841247e-324, 5.38986930905673e-312, 1.33634806931829e-309, 2.12198984142677e-314, 2.1525322333871e+71, NA, 13849427.3254902, 1.45350484977583e+135, 8.48798316386109e-314), .Dim = c(8L, 2L )), points = structure(5.24134922795414e-304, .Dim = c(1L, 1L )))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
/palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612988643-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 488
|
r
|
testlist <- list(lims = structure(c(NA, Inf, 4.94065645841247e-324, 4.94065645841247e-324, 1.38523883680017e-309, 4.94065645841247e-324, 7.83611878861782e-317, 4.94065645841247e-324, 5.38986930905673e-312, 1.33634806931829e-309, 2.12198984142677e-314, 2.1525322333871e+71, NA, 13849427.3254902, 1.45350484977583e+135, 8.48798316386109e-314), .Dim = c(8L, 2L )), points = structure(5.24134922795414e-304, .Dim = c(1L, 1L )))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 103269
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 103269
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc03-nonuniform-depth-109.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 38611
c no.of clauses 103269
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 103269
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc03-nonuniform-depth-109.qdimacs 38611 103269 E1 [] 0 220 38172 103269 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Miller-Marin/trafficlight-controller/tlc03-nonuniform-depth-109/tlc03-nonuniform-depth-109.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 703
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 103269
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 103269
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc03-nonuniform-depth-109.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 38611
c no.of clauses 103269
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 103269
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc03-nonuniform-depth-109.qdimacs 38611 103269 E1 [] 0 220 38172 103269 NONE
|
#' The Ames Housing dataset
#'
#' @format 1460 X 81 dataframe.
#'
#' \describe{
#' \item{SalePrice}{The property's sale price in dollars. This is the target variable.}
#' \item{MSSubClass}{The building class}
#' \item{MSZoning}{The general zoning classification}
#' \item{LotFrontage}{Linear feet of street connected to property}
#' \item{LotArea}{Lot size in square feet}
#' \item{Street}{Type of road access}
#' \item{YearBuilt}{Original construction date}
#' .
#' .
#' .
#' }
house_data = read.csv("house price.csv")
usethis::use_data(house_data, overwrite = TRUE)
|
/R/house_data.R
|
permissive
|
ClaraLichee/somefunctions
|
R
| false
| false
| 590
|
r
|
#' The Ames Housing dataset
#'
#' @format 1460 X 81 dataframe.
#'
#' \describe{
#' \item{SalePrice}{The property's sale price in dollars. This is the target variable.}
#' \item{MSSubClass}{The building class}
#' \item{MSZoning}{The general zoning classification}
#' \item{LotFrontage}{Linear feet of street connected to property}
#' \item{LotArea}{Lot size in square feet}
#' \item{Street}{Type of road access}
#' \item{YearBuilt}{Original construction date}
#' .
#' .
#' .
#' }
house_data = read.csv("house price.csv")
usethis::use_data(house_data, overwrite = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{is_invalid_na_or_n}
\alias{is_invalid_na_or_n}
\title{Check if numeric or character vector is invalid, NA or N}
\usage{
is_invalid_na_or_n(x)
}
\arguments{
\item{x}{number or character}
}
\value{
TRUE/FALSE
}
\description{
Check is_invalid and is_n for definitions of invalid and N
}
|
/man/is_invalid_na_or_n.Rd
|
no_license
|
EddieZhang540/INORMUS
|
R
| false
| true
| 378
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{is_invalid_na_or_n}
\alias{is_invalid_na_or_n}
\title{Check if numeric or character vector is invalid, NA or N}
\usage{
is_invalid_na_or_n(x)
}
\arguments{
\item{x}{number or character}
}
\value{
TRUE/FALSE
}
\description{
Check is_invalid and is_n for definitions of invalid and N
}
|
# Dichotomize the data
# The bootstrap data
is_radical <- function(x) { abs(x) > 10 }
df_vals <- length(dfs_boot)
data_shell <- matrix(NA, nrow = n_parents, ncol = df_vals)
for (i in seq_len(length(dfs_boot))) {
data <- dfs_boot[[i]]
data_jeff <- apply(data, c(1, 2), is_radical)
summary_jeff <- apply(data_jeff, 2, sum) / n_bss
data_shell[ , i] <- summary_jeff
}
shell_melt <- reshape2::melt(data_shell)[, 2:3]
colnames(shell_melt) <- c("df", "radical")
shell_melt$df <- as.factor(shell_melt$df)
# The baseline data
baseline_jeff <- is_radical(baseline_dat)
summary_baseline_jeff <- colSums(baseline_jeff) / nrow(baseline_jeff)
truth <- rep(summary_baseline_jeff, each = n_parents)
shell_melt <- cbind(shell_melt, truth)
# Plotting it all
# Calculate the mean for each value of df and add to data frame
setDT(shell_melt)[, mean_theta := mean(radical), by = df]
shell_melt$df <- as.numeric(shell_melt$df)
for (i in seq_len(length(dfs))) {
shell_melt[shell_melt$df == i, 1] <- dfs[i]
}
shell_melt$df <- as.factor(shell_melt$df)
jpeg("n1000_jeff10.jpg", width = 700, height = 700)
ggplot(shell_melt, aes(x = radical)) +
geom_density() + geom_point(aes(x = truth, y = 0.1), col = "red") +
geom_point(aes(x = mean_theta, y = 0.1), col = "black") +
facet_grid(rows = vars(df), scales = "free_x", labeller = label_both) +
xlab("Probability of observing conclusive evidence") + ylab("Density") +
#theme_tufte() + #theme(axis.line = element_line()) +
scale_x_continuous(expand = c(0, 0), limits = c(0, 1)) +
scale_y_continuous(expand = c(0, 0), limits = c(0, 10)) +
theme_bw() + theme(text = element_text(size = 15))
dev.off()
|
/Code/Old versions/First functioning version/3b_jeffreys_vis.R
|
no_license
|
ooelrich/bootstrap-overconfidence
|
R
| false
| false
| 1,683
|
r
|
# Dichotomize the data
# The bootstrap data
is_radical <- function(x) { abs(x) > 10 }
df_vals <- length(dfs_boot)
data_shell <- matrix(NA, nrow = n_parents, ncol = df_vals)
for (i in seq_len(length(dfs_boot))) {
data <- dfs_boot[[i]]
data_jeff <- apply(data, c(1, 2), is_radical)
summary_jeff <- apply(data_jeff, 2, sum) / n_bss
data_shell[ , i] <- summary_jeff
}
shell_melt <- reshape2::melt(data_shell)[, 2:3]
colnames(shell_melt) <- c("df", "radical")
shell_melt$df <- as.factor(shell_melt$df)
# The baseline data
baseline_jeff <- is_radical(baseline_dat)
summary_baseline_jeff <- colSums(baseline_jeff) / nrow(baseline_jeff)
truth <- rep(summary_baseline_jeff, each = n_parents)
shell_melt <- cbind(shell_melt, truth)
# Plotting it all
# Calculate the mean for each value of df and add to data frame
setDT(shell_melt)[, mean_theta := mean(radical), by = df]
shell_melt$df <- as.numeric(shell_melt$df)
for (i in seq_len(length(dfs))) {
shell_melt[shell_melt$df == i, 1] <- dfs[i]
}
shell_melt$df <- as.factor(shell_melt$df)
jpeg("n1000_jeff10.jpg", width = 700, height = 700)
ggplot(shell_melt, aes(x = radical)) +
geom_density() + geom_point(aes(x = truth, y = 0.1), col = "red") +
geom_point(aes(x = mean_theta, y = 0.1), col = "black") +
facet_grid(rows = vars(df), scales = "free_x", labeller = label_both) +
xlab("Probability of observing conclusive evidence") + ylab("Density") +
#theme_tufte() + #theme(axis.line = element_line()) +
scale_x_continuous(expand = c(0, 0), limits = c(0, 1)) +
scale_y_continuous(expand = c(0, 0), limits = c(0, 10)) +
theme_bw() + theme(text = element_text(size = 15))
dev.off()
|
library(pdftools)
library(stringr)
library(plotly)
# Get current time for later use
currenttime <- as.POSIXct(Sys.time(), tz=Sys.timezone())
attributes(currenttime)$tzone <- "Asia/Hong_Kong"
currenttimetext <- paste("最後更新於香港時間 ", format(currenttime, "%Y-%m-%d %H:%M"), sep="")
# download latest data file
download.file("https://www.chp.gov.hk/files/pdf/building_list_eng_20210212.pdf", "../00_original/building_list_eng_20210212.pdf")
# remember to manually check if this is the correct separator
# "List of buildings with confirmed / probable cases visited from 2 days before onset"
# read in all pdf names
pdflist <- dir(path="../00_original/", pattern=".pdf")
###################################
# preparation for info extraction #
###################################
# read in hk district
district <- read.csv("../00_original/district-population.csv")
# set up empty df for 2-week numbers
master2wk <- data.frame(district_en = NULL, day = NULL, case = NULL)
##########################
# actual info extraction #
##########################
# from building_list_eng_20200123_184843.pdf == pdflist[1]
# to building_list_eng_20200212_202406.pdf == pdflist[18]
for (i_file in 1:18){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
table(master2wk$day)
# from building_list_eng_20200213_000000.pdf == pdflist[19]
# to building_list_eng_20200216_220021.pdf == pdflist[22]
# need to remove all text after
# "\n List of non-residential building with 2 or more confirmed cases of novel coronavirus infection\n"
for (i_file in 19:22){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- gsub("\n List of non-residential building with 2 or more confirmed cases of novel coronavirus infection\n.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# only on building_list_eng_20200217_231207.pdf == pdflist[23]
targetfile <- paste("../00_original/", pdflist[23], sep="")
temptext <- pdf_text(targetfile)
temptext <- temptext[1:grep("List of non-residential buildings with confirmed cases visited after onset of symptoms", temptext)-1]
tempdate <- stringr::str_extract(pdflist[23], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
# from building_list_eng_20200218_215230.pdf == pdflist[24]
# to building_list_eng_20200219_173655.pdf == pdflist[25]
for (i_file in 24:25){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed cases visited after onset of symptoms or", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# from building_list_eng_20200220_224016.pdf == pdflist[26]
# to building_list_eng_20200221_213153.pdf == pdflist[27]
for (i_file in 26:27){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with probable/confirmed cases visited after onset of symptoms", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# for building_list_eng_20200222_000000.pdf == pdflist[28]
targetfile <- paste("../00_original/", pdflist[28], sep="")
temptext <- pdf_text(targetfile)
temptext <- temptext[1:grep("List of buildings with probable/confirmed cases visited from 2 days before onset of symptoms", temptext)-1]
tempdate <- stringr::str_extract(pdflist[28], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
# List of buildings with confirmed cases visited from 2 days before onset of symptoms
# building_list_eng_20200223_225143.pdf == pdflist[29]
# building_list_eng_20200310_225716.pdf == pdflist[45]
for (i_file in 29:45){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed cases visited from 2 days before onset of symptoms", temptext)]
temptext <- gsub("List of buildings with confirmed cases visited from 2 days before onset of symptoms.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# List of buildings with confirmed / probable cases visited from 2 days
# from building_list_eng_20200311_223130.pdf == pdflist[46]
# to building_list_eng_20200326_000000.pdf == pdflist[61]
for (i_file in 46:61){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited from 2 days", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings with confirmed / probable cases visited"
# from building_list_eng_20200327_234929.pdf == pdflist[62]
# to building_list_eng_20200425_000000.pdf == pdflist[91]
for (i_file in 62:91){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings visited by confirmed / probable cases"
# from building_list_eng_20200426_000000.pdf == pdflist[92]
# to building_list_eng_20200428.pdf == pdflist[94]
for (i_file in 92:94){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings visited by confirmed / probable cases", temptext)]
temptext <- gsub("List of buildings visited by confirmed / probable cases.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of Buildings Visited by Confirmed / Probable Cases"
# from building_list_eng_20200429.pdf == pdflist[95]
# to building_list_eng_20200707.pdf == pdflist[164]
for (i_file in 95:164){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of Buildings Visited by Confirmed / Probable Cases", temptext)]
temptext <- gsub("List of Buildings Visited by Confirmed / Probable Cases.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings with confirmed / probable cases visited from 2 days"
# from building_list_eng_20200708.pdf == pdflist[165]
# to building_list_eng_20200829.pdf == pdflist[217]
for (i_file in 165:217){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited from 2 days", temptext)]
temptext <- gsub("List of buildings with confirmed / probable cases visited from 2 days.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings with confirmed / probable cases visited from 2 days before onset"
# from building_list_eng_20200830.pdf == pdflist[218]
# to building_list_eng_20201231.pdf == pdflist[340]
for (i_file in 218:340){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited from 2 days before onset", temptext)]
temptext <- gsub("List of buildings with confirmed / probable cases visited from 2 days before onset.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings with confirmed / probable cases visited from 2 days before onset"
# from building_list_eng_20200830.pdf == pdflist[218]
# to current
for (i_file in 341:length(pdflist)){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "202[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited from 2 days before onset", temptext)]
temptext <- gsub("List of buildings with confirmed / probable cases visited from 2 days before onset.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
#############################
# done with data extraction #
#############################
write.csv(master2wk, "hk-covid19-2wk.csv", row.names=F)
###################################
# data wrangle / summary for plot #
###################################
master2wk$case <- as.numeric(as.character(master2wk$case))
master2wk$day <- as.Date(master2wk$day)
masterday <- master2wk
masterday$case <- round(masterday$case/14, 3)
masterday <- merge(masterday, district, by="district_en", all.x=T)
masterday$case100k <- round(masterday$case / masterday$pop*100000, 2)
# aesthetics
masterday <- masterday[order(masterday$day, masterday$district_en), ]
masterday <- masterday[, c(1, 4, 5, 2, 3, 6)]
day_earliest <- min(masterday$day)
day_latest <- max(masterday$day)
# find latest PDF name
# extract data for map
pdflist[length(pdflist)]
latestdate <- stringr::str_extract(pdflist[length(pdflist)], "202[0-9]+")
latestdate <- format(as.Date(latestdate, "%Y%m%d"))
master2wk_latest <- subset(master2wk, master2wk$day == latestdate)
master2wk_latest <- merge(master2wk_latest, district, by="district_en")
master2wk_latest$case100k <- round(master2wk_latest$case / master2wk_latest$pop * 100000, 3)
master2wk_latest$day <- NULL
master2wk_latest$district_ch <- NULL
master2wk_latest$pop <- NULL
names(master2wk_latest) <- c("District", "case", "case100k")
head(masterday)
#########
# plots #
#########
fig_day <-
plot_ly() %>%
add_trace(data=masterday, type="scatter", mode="lines",
x=~day, y=~case, color=~district_ch) %>%
layout(title=list(text="14日平均每日新增新型肺炎個案", y=0.99),
xaxis=list(title="日期", range=c(day_earliest-1, day_latest +1)),
yaxis=list(title="14日平均新增新型肺炎個案"),
legend=list(x=0.025, y=0.975))
fig_100k <-
plot_ly() %>%
add_trace(data=masterday, type="scatter", mode="lines",
x=~day, y=~case100k, color=~district_ch) %>%
layout(title=list(text="14日平均每日每十萬人新增新型肺炎個案", y=0.99),
xaxis=list(title="日期", range=c(day_earliest-1, day_latest +1)),
yaxis=list(title="14日平均每十萬人新增新型肺炎個案"),
legend=list(x=0.025, y=0.975))
#######
# map #
#######
library(sf)
library(leaflet)
# read, then merge with numbers
districtmap <- st_read("../00_original/hksar_18_district_boundary.json")
districtmap <- merge(districtmap, master2wk_latest, by="District")
# cf https://rstudio.github.io/leaflet/choropleths.html
bins_raw <- c(0, 1, 2, 5, 10, 15, 70, 100, Inf)
palette_raw <- colorBin("Reds", domain=districtmap$case, bins=bins_raw)
label_raw <- sprintf("<strong>%s</strong><br/>過去14日有 %g 宗確診或疑似個案",
districtmap$地區, districtmap$case) %>%
lapply(htmltools::HTML)
map_raw <-
leaflet() %>%
setView(114.167265, 22.360296, zoom=10) %>%
addPolygons(data=districtmap, color="black", weight=2, dashArray="3",
fillColor=~palette_raw(case), fillOpacity=0.7,
highlight=highlightOptions(weight=5, dashArray = "", bringToFront = TRUE),
label=label_raw) %>%
addProviderTiles(providers$Esri.WorldTopoMap)
bins_100k <- c(0, 1, 2.5, 5, 10, 25, Inf)
palette_100k <- colorBin("Reds", domain=districtmap$case100k, bins=bins_100k)
label_100k <- sprintf("<strong>%s</strong><br/>過去14日,每十萬人有 %g 宗確診或疑似個案",
districtmap$地區, districtmap$case100k) %>%
lapply(htmltools::HTML)
map_100k <-
leaflet() %>%
setView(114.167265, 22.360296, zoom=10) %>%
addPolygons(data=districtmap, color="black", weight=2, dashArray="3",
fillColor=~palette_100k(case100k), fillOpacity=0.7,
highlight=highlightOptions(weight=5, dashArray = "", bringToFront = TRUE),
label=label_100k) %>%
addProviderTiles(providers$Esri.WorldTopoMap)
##################
# Make dashboard #
##################
# make dashboard, copy to root directory
rmarkdown::render(input = "index.Rmd")
file.copy("index.html", "../docs/", overwrite=T)
|
/2021-02-12/analysis.R
|
no_license
|
tszhim-tsui/2020-hk-covid19
|
R
| false
| false
| 29,259
|
r
|
library(pdftools)
library(stringr)
library(plotly)
# Get current time for later use
currenttime <- as.POSIXct(Sys.time(), tz=Sys.timezone())
attributes(currenttime)$tzone <- "Asia/Hong_Kong"
currenttimetext <- paste("最後更新於香港時間 ", format(currenttime, "%Y-%m-%d %H:%M"), sep="")
# download latest data file
download.file("https://www.chp.gov.hk/files/pdf/building_list_eng_20210212.pdf", "../00_original/building_list_eng_20210212.pdf")
# remember to manually check if this is the correct separator
# "List of buildings with confirmed / probable cases visited from 2 days before onset"
# read in all pdf names
pdflist <- dir(path="../00_original/", pattern=".pdf")
###################################
# preparation for info extraction #
###################################
# read in hk district
district <- read.csv("../00_original/district-population.csv")
# set up empty df for 2-week numbers
master2wk <- data.frame(district_en = NULL, day = NULL, case = NULL)
##########################
# actual info extraction #
##########################
# from building_list_eng_20200123_184843.pdf == pdflist[1]
# to building_list_eng_20200212_202406.pdf == pdflist[18]
for (i_file in 1:18){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
table(master2wk$day)
# from building_list_eng_20200213_000000.pdf == pdflist[19]
# to building_list_eng_20200216_220021.pdf == pdflist[22]
# need to remove all text after
# "\n List of non-residential building with 2 or more confirmed cases of novel coronavirus infection\n"
for (i_file in 19:22){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- gsub("\n List of non-residential building with 2 or more confirmed cases of novel coronavirus infection\n.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# only on building_list_eng_20200217_231207.pdf == pdflist[23]
targetfile <- paste("../00_original/", pdflist[23], sep="")
temptext <- pdf_text(targetfile)
temptext <- temptext[1:grep("List of non-residential buildings with confirmed cases visited after onset of symptoms", temptext)-1]
tempdate <- stringr::str_extract(pdflist[23], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
# from building_list_eng_20200218_215230.pdf == pdflist[24]
# to building_list_eng_20200219_173655.pdf == pdflist[25]
for (i_file in 24:25){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed cases visited after onset of symptoms or", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# from building_list_eng_20200220_224016.pdf == pdflist[26]
# to building_list_eng_20200221_213153.pdf == pdflist[27]
for (i_file in 26:27){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with probable/confirmed cases visited after onset of symptoms", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# for building_list_eng_20200222_000000.pdf == pdflist[28]
targetfile <- paste("../00_original/", pdflist[28], sep="")
temptext <- pdf_text(targetfile)
temptext <- temptext[1:grep("List of buildings with probable/confirmed cases visited from 2 days before onset of symptoms", temptext)-1]
tempdate <- stringr::str_extract(pdflist[28], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
# List of buildings with confirmed cases visited from 2 days before onset of symptoms
# building_list_eng_20200223_225143.pdf == pdflist[29]
# building_list_eng_20200310_225716.pdf == pdflist[45]
for (i_file in 29:45){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed cases visited from 2 days before onset of symptoms", temptext)]
temptext <- gsub("List of buildings with confirmed cases visited from 2 days before onset of symptoms.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# List of buildings with confirmed / probable cases visited from 2 days
# from building_list_eng_20200311_223130.pdf == pdflist[46]
# to building_list_eng_20200326_000000.pdf == pdflist[61]
for (i_file in 46:61){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited from 2 days", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings with confirmed / probable cases visited"
# from building_list_eng_20200327_234929.pdf == pdflist[62]
# to building_list_eng_20200425_000000.pdf == pdflist[91]
for (i_file in 62:91){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited", temptext)-1]
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings visited by confirmed / probable cases"
# from building_list_eng_20200426_000000.pdf == pdflist[92]
# to building_list_eng_20200428.pdf == pdflist[94]
for (i_file in 92:94){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings visited by confirmed / probable cases", temptext)]
temptext <- gsub("List of buildings visited by confirmed / probable cases.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of Buildings Visited by Confirmed / Probable Cases"
# from building_list_eng_20200429.pdf == pdflist[95]
# to building_list_eng_20200707.pdf == pdflist[164]
for (i_file in 95:164){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of Buildings Visited by Confirmed / Probable Cases", temptext)]
temptext <- gsub("List of Buildings Visited by Confirmed / Probable Cases.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings with confirmed / probable cases visited from 2 days"
# from building_list_eng_20200708.pdf == pdflist[165]
# to building_list_eng_20200829.pdf == pdflist[217]
for (i_file in 165:217){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited from 2 days", temptext)]
temptext <- gsub("List of buildings with confirmed / probable cases visited from 2 days.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings with confirmed / probable cases visited from 2 days before onset"
# from building_list_eng_20200830.pdf == pdflist[218]
# to building_list_eng_20201231.pdf == pdflist[340]
for (i_file in 218:340){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "2020[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited from 2 days before onset", temptext)]
temptext <- gsub("List of buildings with confirmed / probable cases visited from 2 days before onset.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
# "List of buildings with confirmed / probable cases visited from 2 days before onset"
# from building_list_eng_20200830.pdf == pdflist[218]
# to current
for (i_file in 341:length(pdflist)){
# loop through each target file, also retrive date from file name
targetfile <- paste("../00_original/", pdflist[i_file], sep="")
temptext <- pdf_text(targetfile)
tempdate <- stringr::str_extract(pdflist[i_file], "202[0-9]+")
tempdate <- format(as.Date(tempdate, "%Y%m%d"))
# remove text from second, irrelevant table
# step 1, remove pages after the beginning of the table
# step 2, remove text after the title of the irrelevant table
temptext <- temptext[1:grep("List of buildings with confirmed / probable cases visited from 2 days before onset", temptext)]
temptext <- gsub("List of buildings with confirmed / probable cases visited from 2 days before onset.*", "", temptext)
# loop through each district
for (i_district in 1:dim(district)[1]){
# since districts in PDF always appear after a line break
# set up regex pattern to match
if (district$district_en[i_district] == "Sha Tin"){
targetdistrict <- paste("\\\n[ ]*", "(Shatin|Sha Tin)", sep="")
} else if (district$district_en[i_district] == "Central & Western") {
targetdistrict <- paste("\\\n[ ]*", "(Central|Central & Western)", sep="")
} else {
targetdistrict <- paste("\\\n[ ]*", district$district_en[i_district], sep="")
}
# grep all matches, then unlist to get matching locations
tempcount <- gregexpr(targetdistrict, temptext)
tempcount <- unlist(tempcount)
# note non-matches are -1
# get num of district's cases
numofcase <- sum(tempcount!=-1)
# cbind required information, then rbind it to the df master2wk
temprow <- cbind(district_en = as.character(district$district_en[i_district]),
day = tempdate,
case = numofcase)
master2wk <- rbind(master2wk, temprow)
}
}
#############################
# done with data extraction #
#############################
write.csv(master2wk, "hk-covid19-2wk.csv", row.names=F)
###################################
# data wrangle / summary for plot #
###################################
master2wk$case <- as.numeric(as.character(master2wk$case))
master2wk$day <- as.Date(master2wk$day)
masterday <- master2wk
masterday$case <- round(masterday$case/14, 3)
masterday <- merge(masterday, district, by="district_en", all.x=T)
masterday$case100k <- round(masterday$case / masterday$pop*100000, 2)
# aesthetics
masterday <- masterday[order(masterday$day, masterday$district_en), ]
masterday <- masterday[, c(1, 4, 5, 2, 3, 6)]
day_earliest <- min(masterday$day)
day_latest <- max(masterday$day)
# find latest PDF name
# extract data for map
pdflist[length(pdflist)]
latestdate <- stringr::str_extract(pdflist[length(pdflist)], "202[0-9]+")
latestdate <- format(as.Date(latestdate, "%Y%m%d"))
master2wk_latest <- subset(master2wk, master2wk$day == latestdate)
master2wk_latest <- merge(master2wk_latest, district, by="district_en")
master2wk_latest$case100k <- round(master2wk_latest$case / master2wk_latest$pop * 100000, 3)
master2wk_latest$day <- NULL
master2wk_latest$district_ch <- NULL
master2wk_latest$pop <- NULL
names(master2wk_latest) <- c("District", "case", "case100k")
head(masterday)
#########
# plots #
#########
fig_day <-
plot_ly() %>%
add_trace(data=masterday, type="scatter", mode="lines",
x=~day, y=~case, color=~district_ch) %>%
layout(title=list(text="14日平均每日新增新型肺炎個案", y=0.99),
xaxis=list(title="日期", range=c(day_earliest-1, day_latest +1)),
yaxis=list(title="14日平均新增新型肺炎個案"),
legend=list(x=0.025, y=0.975))
fig_100k <-
plot_ly() %>%
add_trace(data=masterday, type="scatter", mode="lines",
x=~day, y=~case100k, color=~district_ch) %>%
layout(title=list(text="14日平均每日每十萬人新增新型肺炎個案", y=0.99),
xaxis=list(title="日期", range=c(day_earliest-1, day_latest +1)),
yaxis=list(title="14日平均每十萬人新增新型肺炎個案"),
legend=list(x=0.025, y=0.975))
#######
# map #
#######
library(sf)
library(leaflet)
# read, then merge with numbers
districtmap <- st_read("../00_original/hksar_18_district_boundary.json")
districtmap <- merge(districtmap, master2wk_latest, by="District")
# cf https://rstudio.github.io/leaflet/choropleths.html
bins_raw <- c(0, 1, 2, 5, 10, 15, 70, 100, Inf)
palette_raw <- colorBin("Reds", domain=districtmap$case, bins=bins_raw)
label_raw <- sprintf("<strong>%s</strong><br/>過去14日有 %g 宗確診或疑似個案",
districtmap$地區, districtmap$case) %>%
lapply(htmltools::HTML)
map_raw <-
leaflet() %>%
setView(114.167265, 22.360296, zoom=10) %>%
addPolygons(data=districtmap, color="black", weight=2, dashArray="3",
fillColor=~palette_raw(case), fillOpacity=0.7,
highlight=highlightOptions(weight=5, dashArray = "", bringToFront = TRUE),
label=label_raw) %>%
addProviderTiles(providers$Esri.WorldTopoMap)
bins_100k <- c(0, 1, 2.5, 5, 10, 25, Inf)
palette_100k <- colorBin("Reds", domain=districtmap$case100k, bins=bins_100k)
label_100k <- sprintf("<strong>%s</strong><br/>過去14日,每十萬人有 %g 宗確診或疑似個案",
districtmap$地區, districtmap$case100k) %>%
lapply(htmltools::HTML)
map_100k <-
leaflet() %>%
setView(114.167265, 22.360296, zoom=10) %>%
addPolygons(data=districtmap, color="black", weight=2, dashArray="3",
fillColor=~palette_100k(case100k), fillOpacity=0.7,
highlight=highlightOptions(weight=5, dashArray = "", bringToFront = TRUE),
label=label_100k) %>%
addProviderTiles(providers$Esri.WorldTopoMap)
##################
# Make dashboard #
##################
# make dashboard, copy to root directory
rmarkdown::render(input = "index.Rmd")
file.copy("index.html", "../docs/", overwrite=T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.iotanalytics_operations.R
\name{describe_logging_options}
\alias{describe_logging_options}
\title{Retrieves the current settings of the AWS IoT Analytics logging options}
\usage{
describe_logging_options()
}
\description{
Retrieves the current settings of the AWS IoT Analytics logging options.
}
\section{Accepted Parameters}{
\preformatted{describe_logging_options()
}
}
|
/service/paws.iotanalytics/man/describe_logging_options.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 457
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.iotanalytics_operations.R
\name{describe_logging_options}
\alias{describe_logging_options}
\title{Retrieves the current settings of the AWS IoT Analytics logging options}
\usage{
describe_logging_options()
}
\description{
Retrieves the current settings of the AWS IoT Analytics logging options.
}
\section{Accepted Parameters}{
\preformatted{describe_logging_options()
}
}
|
#Pick rows to leave out... for a LOO on the rows
#
#'Identify a matrix of row indices for LOO_rows
#'
#'@param DESIGN_rows DESIGN matrix for rows (stimuli nested in categories)
#'@param multiplier parameter to increase the number of iterations
#'@return A matrix of row indices for each iteration of the LOO
#'@export
Pick_rows_to_leave_out <- function(DESIGN_rows = DESIGN_rows, multiplier = 1){
#Define the total number of iterations.
#For each iteration, we'll predict the positions of B randomly-selected stimuli.
#For now, let's choose to iterate AB*B times. This will ensure that each row is Predicted at least B times.
#Can also boost that number of iterations by multiplier
total_iter <- DESIGN_rows$AB * DESIGN_rows$B * multiplier
Leave_out_these_rows <- matrix(NA, DESIGN_rows$B, total_iter)
counter <- 0
Left_out_rows <- matrix(NA, nrow(Leave_out_these_rows), 1)
for(b in 1:(DESIGN_rows$B * multiplier)){
for(ab in 1:DESIGN_rows$AB){
counter <- counter + 1
#identify the category B of stimulus ab
Category_of_primary_Left_out <- which(DESIGN_rows$mat[ab,]==1)
#Assign that left out stimulus (row number) to its category (to keep the order of Left_out consistent)
Left_out_rows[Category_of_primary_Left_out] <- ab
#sample 1 stimulus from each of the other Bs
The_other_categories <- c(1:ncol(DESIGN_rows$mat))[-Category_of_primary_Left_out]
for(j in The_other_categories){
Left_out_rows[j] <- sample(which(DESIGN_rows$mat[,j]==1),1)
}
#and store as columns of eave_out_these_rows
Leave_out_these_rows[,counter] <- Left_out_rows
}
}
return(Leave_out_these_rows)
}
|
/R/Pick_rows_to_leave_out.R
|
no_license
|
michaelkriegsman/DiDiSTATIS
|
R
| false
| false
| 1,686
|
r
|
#Pick rows to leave out... for a LOO on the rows
#
#'Identify a matrix of row indices for LOO_rows
#'
#'@param DESIGN_rows DESIGN matrix for rows (stimuli nested in categories)
#'@param multiplier parameter to increase the number of iterations
#'@return A matrix of row indices for each iteration of the LOO
#'@export
Pick_rows_to_leave_out <- function(DESIGN_rows = DESIGN_rows, multiplier = 1){
#Define the total number of iterations.
#For each iteration, we'll predict the positions of B randomly-selected stimuli.
#For now, let's choose to iterate AB*B times. This will ensure that each row is Predicted at least B times.
#Can also boost that number of iterations by multiplier
total_iter <- DESIGN_rows$AB * DESIGN_rows$B * multiplier
Leave_out_these_rows <- matrix(NA, DESIGN_rows$B, total_iter)
counter <- 0
Left_out_rows <- matrix(NA, nrow(Leave_out_these_rows), 1)
for(b in 1:(DESIGN_rows$B * multiplier)){
for(ab in 1:DESIGN_rows$AB){
counter <- counter + 1
#identify the category B of stimulus ab
Category_of_primary_Left_out <- which(DESIGN_rows$mat[ab,]==1)
#Assign that left out stimulus (row number) to its category (to keep the order of Left_out consistent)
Left_out_rows[Category_of_primary_Left_out] <- ab
#sample 1 stimulus from each of the other Bs
The_other_categories <- c(1:ncol(DESIGN_rows$mat))[-Category_of_primary_Left_out]
for(j in The_other_categories){
Left_out_rows[j] <- sample(which(DESIGN_rows$mat[,j]==1),1)
}
#and store as columns of eave_out_these_rows
Leave_out_these_rows[,counter] <- Left_out_rows
}
}
return(Leave_out_these_rows)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wksxp.R
\name{wksxp}
\alias{wksxp}
\alias{parse_wksxp}
\alias{as_wksxp}
\alias{as_wksxp.default}
\alias{as_wksxp.character}
\alias{as_wksxp.wk_wksxp}
\alias{as_wksxp.wk_wkt}
\alias{as_wksxp.wk_wkb}
\title{Mark lists as well-known "S" expressions}
\usage{
wksxp(x = list())
parse_wksxp(x)
as_wksxp(x, ...)
\method{as_wksxp}{default}(x, ...)
\method{as_wksxp}{character}(x, ...)
\method{as_wksxp}{wk_wksxp}(x, ..., include_z = NULL, include_m = NULL, include_srid = NULL)
\method{as_wksxp}{wk_wkt}(x, ..., include_z = NULL, include_m = NULL, include_srid = NULL)
\method{as_wksxp}{wk_wkb}(x, ..., include_z = NULL, include_m = NULL, include_srid = NULL)
}
\arguments{
\item{x}{A \code{\link[=list]{list()}} features (see details)}
\item{...}{Unused}
\item{include_z}{Include the
values of the Z and M coordinates and/or SRID
in the output? Use \code{FALSE} to omit, \code{TRUE} to include, or \code{NA} to include
only if present. Note that using \code{TRUE} may result in an error if there
is no value present in the original.}
\item{include_m}{Include the
values of the Z and M coordinates and/or SRID
in the output? Use \code{FALSE} to omit, \code{TRUE} to include, or \code{NA} to include
only if present. Note that using \code{TRUE} may result in an error if there
is no value present in the original.}
\item{include_srid}{Include the
values of the Z and M coordinates and/or SRID
in the output? Use \code{FALSE} to omit, \code{TRUE} to include, or \code{NA} to include
only if present. Note that using \code{TRUE} may result in an error if there
is no value present in the original.}
}
\value{
A \code{\link[=new_wk_wksxp]{new_wk_wksxp()}}
}
\description{
Mark lists as well-known "S" expressions
}
\details{
The "wksxp" format is experimental, but was written as a way to
make it possible for packages to generate \code{\link[=wkb]{wkb()}} vectors without
needing to use C++. The format represents geometries as following:
\itemize{
\item points are matrices with zero or one row
\item linestrings are matrices (one row per point)
\item polygons are lists of matrices (one matrix per ring)
\item multi (point, linestring, polygon) types are lists
of the simple types (without any meta information)
\item collections are lists of any type (must contain meta)
}
Any geometry that isn't in a multi type must have meta information
encoded as attributes. The attribures that are used are:
\itemize{
\item \code{class}: "wk_(point|linestring|...)
\item \code{has_z}: use \code{TRUE} if there is a Z coordinate
(may be omitted if false)
\item \code{has_m}: use \code{TRUE} if there is an M coordinate
(may be omitted if false)
}
This is similar to the \code{sf::st_sfc()} format, but the formats aren't
interchangable.
}
\examples{
wksxp(wkt_translate_wksxp("POINT (20 10)"))
}
|
/wk/man/wksxp.Rd
|
no_license
|
akhikolla/InformationHouse
|
R
| false
| true
| 2,868
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wksxp.R
\name{wksxp}
\alias{wksxp}
\alias{parse_wksxp}
\alias{as_wksxp}
\alias{as_wksxp.default}
\alias{as_wksxp.character}
\alias{as_wksxp.wk_wksxp}
\alias{as_wksxp.wk_wkt}
\alias{as_wksxp.wk_wkb}
\title{Mark lists as well-known "S" expressions}
\usage{
wksxp(x = list())
parse_wksxp(x)
as_wksxp(x, ...)
\method{as_wksxp}{default}(x, ...)
\method{as_wksxp}{character}(x, ...)
\method{as_wksxp}{wk_wksxp}(x, ..., include_z = NULL, include_m = NULL, include_srid = NULL)
\method{as_wksxp}{wk_wkt}(x, ..., include_z = NULL, include_m = NULL, include_srid = NULL)
\method{as_wksxp}{wk_wkb}(x, ..., include_z = NULL, include_m = NULL, include_srid = NULL)
}
\arguments{
\item{x}{A \code{\link[=list]{list()}} features (see details)}
\item{...}{Unused}
\item{include_z}{Include the
values of the Z and M coordinates and/or SRID
in the output? Use \code{FALSE} to omit, \code{TRUE} to include, or \code{NA} to include
only if present. Note that using \code{TRUE} may result in an error if there
is no value present in the original.}
\item{include_m}{Include the
values of the Z and M coordinates and/or SRID
in the output? Use \code{FALSE} to omit, \code{TRUE} to include, or \code{NA} to include
only if present. Note that using \code{TRUE} may result in an error if there
is no value present in the original.}
\item{include_srid}{Include the
values of the Z and M coordinates and/or SRID
in the output? Use \code{FALSE} to omit, \code{TRUE} to include, or \code{NA} to include
only if present. Note that using \code{TRUE} may result in an error if there
is no value present in the original.}
}
\value{
A \code{\link[=new_wk_wksxp]{new_wk_wksxp()}}
}
\description{
Mark lists as well-known "S" expressions
}
\details{
The "wksxp" format is experimental, but was written as a way to
make it possible for packages to generate \code{\link[=wkb]{wkb()}} vectors without
needing to use C++. The format represents geometries as following:
\itemize{
\item points are matrices with zero or one row
\item linestrings are matrices (one row per point)
\item polygons are lists of matrices (one matrix per ring)
\item multi (point, linestring, polygon) types are lists
of the simple types (without any meta information)
\item collections are lists of any type (must contain meta)
}
Any geometry that isn't in a multi type must have meta information
encoded as attributes. The attribures that are used are:
\itemize{
\item \code{class}: "wk_(point|linestring|...)
\item \code{has_z}: use \code{TRUE} if there is a Z coordinate
(may be omitted if false)
\item \code{has_m}: use \code{TRUE} if there is an M coordinate
(may be omitted if false)
}
This is similar to the \code{sf::st_sfc()} format, but the formats aren't
interchangable.
}
\examples{
wksxp(wkt_translate_wksxp("POINT (20 10)"))
}
|
library(magick)
library(tidyverse)
img = image_read("data/sample.jpg")
chip_dir = "data/chipped/"
chip_size = 512 # pixels
overlap = 50 # percent
if(chip_size %% 4 != 0) {
stop("Chip size must be a multiple of 4.")
}
### get number of horizontal and vertical chips (does not include the final rightmost and bottom-most chip in each dimension)
window_step = ((chip_size*(overlap/100))/2 %>% floor)*2
### get image dims
info = image_info(img)
width = info$width
height = info$height
n_h_ch = floor(width / window_step) - 1 # subtract one because the final value less then a full chip-width from the edge of the image
n_v_ch = floor(height / window_step) - 1
### get the coords of the upper left corner of each chip
cols = 1:n_h_ch
rows = 1:n_v_ch
corners_x = (cols-1) * window_step
corners_y = (rows-1) * window_step
# add a final column to include the incomplete remainder
corners_x = c(corners_x,(width-chip_size))
corners_y = c(corners_y,(height-chip_size))
tiles = expand_grid(x_coord=corners_x,y_coord=corners_y)
tiles$colnum = rep(1:length(corners_x),each=(length(corners_y)))
tiles$rownum = rep(1:length(corners_y),(length(corners_x)))
tiles$tile_id = 1:nrow(tiles)
## crop to each tile; write to file
for(i in tiles$tile_id) {
tile_x = tiles[i,"x_coord"]
tile_y = tiles[i,"y_coord"]
tile_colnum = tiles[i,"colnum"]
tile_rownum = tiles[i,"rownum"]
crop_string = paste0(chip_size,"x",chip_size,"+",tile_x,"+",tile_y)
img_crop = image_crop(img,crop_string)
# name for image is colnum-rownum
name = paste0("chip_",tile_rownum,"-",tile_colnum,".jpg")
image_write(img_crop,paste0("data/chipped/",name),format="jpg")
}
### Write data needed to reconstruct
# overlap percent
# n tiles wide
# n tiles long
# total width
# total length
# how much does the last col extend beyond the second-to-last?
last_col_width = nth(corners_x,-1) - nth(corners_x,-2)
last_row_width = nth(corners_y,-1) - nth(corners_y,-2)
chip_dat = data.frame(overlap = overlap, chip_size = chip_size, window_step = window_step, ncols = n_h_ch+1, nrows = n_v_ch+1, last_col_width, last_row_width, total_width = width, total_height = height)
write_csv(chip_dat,paste0(chip_dir,"chip_dat.csv"))
|
/scripts/chip-image.R
|
permissive
|
youngdjn/chip-and-unchip-image
|
R
| false
| false
| 2,218
|
r
|
library(magick)
library(tidyverse)
img = image_read("data/sample.jpg")
chip_dir = "data/chipped/"
chip_size = 512 # pixels
overlap = 50 # percent
if(chip_size %% 4 != 0) {
stop("Chip size must be a multiple of 4.")
}
### get number of horizontal and vertical chips (does not include the final rightmost and bottom-most chip in each dimension)
window_step = ((chip_size*(overlap/100))/2 %>% floor)*2
### get image dims
info = image_info(img)
width = info$width
height = info$height
n_h_ch = floor(width / window_step) - 1 # subtract one because the final value less then a full chip-width from the edge of the image
n_v_ch = floor(height / window_step) - 1
### get the coords of the upper left corner of each chip
cols = 1:n_h_ch
rows = 1:n_v_ch
corners_x = (cols-1) * window_step
corners_y = (rows-1) * window_step
# add a final column to include the incomplete remainder
corners_x = c(corners_x,(width-chip_size))
corners_y = c(corners_y,(height-chip_size))
tiles = expand_grid(x_coord=corners_x,y_coord=corners_y)
tiles$colnum = rep(1:length(corners_x),each=(length(corners_y)))
tiles$rownum = rep(1:length(corners_y),(length(corners_x)))
tiles$tile_id = 1:nrow(tiles)
## crop to each tile; write to file
for(i in tiles$tile_id) {
tile_x = tiles[i,"x_coord"]
tile_y = tiles[i,"y_coord"]
tile_colnum = tiles[i,"colnum"]
tile_rownum = tiles[i,"rownum"]
crop_string = paste0(chip_size,"x",chip_size,"+",tile_x,"+",tile_y)
img_crop = image_crop(img,crop_string)
# name for image is colnum-rownum
name = paste0("chip_",tile_rownum,"-",tile_colnum,".jpg")
image_write(img_crop,paste0("data/chipped/",name),format="jpg")
}
### Write data needed to reconstruct
# overlap percent
# n tiles wide
# n tiles long
# total width
# total length
# how much does the last col extend beyond the second-to-last?
last_col_width = nth(corners_x,-1) - nth(corners_x,-2)
last_row_width = nth(corners_y,-1) - nth(corners_y,-2)
chip_dat = data.frame(overlap = overlap, chip_size = chip_size, window_step = window_step, ncols = n_h_ch+1, nrows = n_v_ch+1, last_col_width, last_row_width, total_width = width, total_height = height)
write_csv(chip_dat,paste0(chip_dir,"chip_dat.csv"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{get_auroc_heat}
\alias{get_auroc_heat}
\title{Helper function to produce AUROC heatmap}
\usage{
get_auroc_heat(auroc_tibble)
}
\arguments{
\item{auroc_tibble}{Tibble with calculated AUROC}
}
\value{
returns an AUROC or Precision-Recall AUC heatmap
}
\description{
Helper function to produce AUROC heatmap
}
|
/man/get_auroc_heat.Rd
|
permissive
|
ohsu-comp-bio/decoupleRBench
|
R
| false
| true
| 401
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{get_auroc_heat}
\alias{get_auroc_heat}
\title{Helper function to produce AUROC heatmap}
\usage{
get_auroc_heat(auroc_tibble)
}
\arguments{
\item{auroc_tibble}{Tibble with calculated AUROC}
}
\value{
returns an AUROC or Precision-Recall AUC heatmap
}
\description{
Helper function to produce AUROC heatmap
}
|
#' Subset texts
#'
#' Subset texts.
#'
#' @param corpus Text corpus.
#' @param max_length Maximum length of the texts to be sampled. \code{max_length} is an integer greater than 0. By default, \code{max_length} is set to 1.
#' @param min_length Minimum length of the texts to be sampled. \code{min_length} is an integer greater than 0. By default, \code{min_length} is set to 1.
#' @param word_list A word list.
#' @return An object of class \code{data.frame}.
#' @export
subset_text <- function(corpus, min_length, max_length, word_list) {
if (!(is.null(min_length) && is.null(max_length))){
out <- corpus %>% dplyr::filter(N >= min_length, N <= max_length)
} else {
out <- corpus
}
if (!is.null(word_list)) {
if(!is.character(word_list)) {
stop("Invalid parameter: word_list.")
}
pattern <- paste(word_list, collapse = "|")
out$In_word_list <- stringr::str_detect(out$Text, pattern)
out <- out %>% dplyr::filter(In_word_list == TRUE)
}
return(out)
}
|
/R/subset_text.R
|
permissive
|
nproellochs/textsampler
|
R
| false
| false
| 1,003
|
r
|
#' Subset texts
#'
#' Subset texts.
#'
#' @param corpus Text corpus.
#' @param max_length Maximum length of the texts to be sampled. \code{max_length} is an integer greater than 0. By default, \code{max_length} is set to 1.
#' @param min_length Minimum length of the texts to be sampled. \code{min_length} is an integer greater than 0. By default, \code{min_length} is set to 1.
#' @param word_list A word list.
#' @return An object of class \code{data.frame}.
#' @export
subset_text <- function(corpus, min_length, max_length, word_list) {
if (!(is.null(min_length) && is.null(max_length))){
out <- corpus %>% dplyr::filter(N >= min_length, N <= max_length)
} else {
out <- corpus
}
if (!is.null(word_list)) {
if(!is.character(word_list)) {
stop("Invalid parameter: word_list.")
}
pattern <- paste(word_list, collapse = "|")
out$In_word_list <- stringr::str_detect(out$Text, pattern)
out <- out %>% dplyr::filter(In_word_list == TRUE)
}
return(out)
}
|
#' @title creates a network module at the repeat level to investigate repeat initiating pathways
#' @description this differs from wgcna.R, where wgcna.R create phenotypic module analysis of genes and phenotypic repeat data. wrna will run a single/block wise adjacency/correlation matrices of repeats into modules of transcripts and also returns an object ready for wgcnaDbLite that includes repeat phenotypic data. This uses the recommended 'biocor' function which is a bi-weight mid-correlation calculation. For normalization, the default uses tmm normalization and a log2 transformation for each cpm for genes, We use 'signed' networks based on the FAQ. This will create the blockwise/single module data frame and run the soft-thresholding and create a correlation heatmap. the downstream method is wgcna_analsyis which investigates specific module color and specific biotype (phenotypic relationship). tx_biotype is included as phenotypic data
#' @param kexp a kexp 2 group stage is preferred
#' @param read.cutoff integer floor filter
#' @param minBranch integer for cluter min
#' @param whichWGCNA character single or block analysis, block is more sensitive
#' @param entrezOnly boolean, soon to be deprecated because entrez is auto filtered when enrichment testing
#' @param species char, mouse or humans
#' @param selectedPower 6 usually is good. can rerun if NULL
#' @param intBiotypes character the tx_biotypes of interest
#' @param useAllBiotypes boolean if false then intBiotypes are used, if true than the correlations are checked against all tx_biotypes
#' @param copyNormalize boolean, if true will execute copyNumberNormalize to normalize the repeat biotype counts by the copy number summation of the corresponding family type copy numbers find from repBase.
#' @import WGCNA
#' @import edgeR
#' @import limma
#' @export
#' @return images and cluster at the gene and repeat level
wrcna<-function(kexp,read.cutoff=2,minBranch=2,whichWGCNA=c("single","block"),species=c("Homo.sapiens","Mus.musculus"),selectedPower=6, intBiotypes=c("acromeric","centromeric","CR1","Alu","DNA transposon","Endogenous Retrovirus","ERV1","ERV3","ERVK","ERVL","hAT","HSFAU","L1","L2","LTR Retrotransposon","Eutr1","Merlin","PiggyBac","Pseudogene","Repetitive element","satellite","snRNA","SVA","TcMar","telo","Transposable Element","Satellite"),useAllBiotypes=FALSE,tmm.norm=TRUE,useBiCor=TRUE,how=c("cpm","tpm"), design=NULL,saveToFile=FALSE){
if(nrow(kexp)>20000){
kexp<-findRepeats(kexp)
}
how<-match.arg(how,c("cpm","tpm"))
byWhich<-"repeat"
##prepare data
whichWGCNA<-match.arg(whichWGCNA,c("single","block"))
species<-match.arg(species,c("Homo.sapiens","Mus.musculus"))
rexp<-findRepeats(kexp)
if(how=="cpm"){
cpm<-collapseBundles(rexp,"tx_id",read.cutoff=read.cutoff)
cpm<-cpm[!grepl("^ERCC",rownames(cpm)),]
cpm<-cpm[!grepl("^ENS",rownames(cpm)),]
rpm<-collapseBundles(rexp,"tx_biotype",read.cutoff=read.cutoff)
rpm<-rpm[!grepl("^ERCC",rownames(rpm)),]
if(tmm.norm==TRUE){
d<-DGEList(counts=cpm)
d<-calcNormFactors(d)
rd<-DGEList(counts=rpm)
rd<-calcNormFactors(rd)
if(is.null(design)==TRUE){
message('cpm normalization')
cpm.norm<-cpm(d,normalized.lib.sizes=TRUE,log=FALSE)
rdm.norm<-cpm(rd,normalized.lib.sizes=TRUE,log=FALSE)
rpm<-log2(1+rdm.norm)
cpm<-log2(1+cpm.norm)
cpm.norm<-NULL
rdm.norm<-NULL
}else{
stopifnot(all(rownames(design)==colnames(kexp)))
message('voom-ing')
res.voom<-voom(d,design)
cpm.norm<-res.voom$E ##log2 normalized
rep.norm<-voom(rd,design)
rdm.norm<-rep.norm$E ##log2 normalized tx_biotypes
rpm<-rdm.norm ##log2 norm
cpm<-cpm.norm #log2 norm
cpm.norm<-NULL
rdm.norm<-NULL
cpm<-log2(1+cpm)
rpm<-log2(1+rpm)
}##if design is input
}# tmm.norm==TRUE
}else if(how=="tpm"){
cpm<-collapseTpm(rexp,"gene_id",read.cutoff=read.cutoff)
cpm<-cpm[!grepl("^ERCC",rownames(cpm)),]
cpm<-cpm[!grepl("^ENS",rownames(cpm)),]
rpm<-collapseTpm(rexp,"tx_biotype",read.cutoff=read.cutoff)
rpm<-rpm[!grepl("^ERCC",rownames(rpm)),]
cpm<-log2(1+cpm)
rpm<-log2(1+rpm) ##log2 transform of repeats.
}
datExpr0<-t(cpm)
gsg<-goodSamplesGenes(datExpr0,verbose=3)
##rows must be SAMPLES columns repeats
if(useAllBiotypes==FALSE){
#select columns of intBiotypes
stopifnot(is.null(intBiotypes)==FALSE)
rpm<-rpm[rownames(rpm)%in%intBiotypes,]
}
datTraits<-t(rpm)
datTraits<-as.data.frame(datTraits)
stopifnot(nrow(datExpr0)==nrow(datTraits))
if (!gsg$allOK)
{
# Optionally, print the gene and sample names that were removed:
if (sum(!gsg$goodGenes)>0)
printFlush(paste("Removing genes:", paste(names(datExpr0)[!gsg$goodGenes], collapse = ", ")));
if (sum(!gsg$goodSamples)>0)
printFlush(paste("Removing samples:", paste(rownames(datExpr0)[!gsg$goodSamples], collapse = ", ")));
# Remove the offending genes and samples from the data:
datExpr0 = datExpr0[gsg$goodSamples, gsg$goodGenes]
}
sampleTree = hclust(dist(datExpr0), method = "average");
# Plot the sample tree: Open a graphic output window of size 12 by 9 inches
# The user should change the dimensions if the window is too large or too small.
sizeGrWindow(12,9)
#pdf(file = "Plots/sampleClustering.pdf", width = 12, height = 9);
par(cex = 0.6);
par(mar = c(0,4,2,0))
plot(sampleTree, main =paste0("Sample clustering to detect outliers"),
sub="",
xlab="", cex.lab = 1.5,
cex.axis = 1.5, cex.main = 2)
cutHeight<-readHeight()
# Plot a line to show the cut
abline(h = cutHeight, col = "red");
readkey()
# Determine cluster under the line
clust = cutreeStatic(sampleTree, cutHeight = cutHeight, minSize = minBranch)
print(table(clust))
# clust 1 contains the samples we want to keep.
keepSamples = (clust!=0)
print(paste0("samples to omit ",colnames(rexp)[which(keepSamples==FALSE)]))
datExpr = datExpr0[keepSamples, ]
nGenes = ncol(datExpr)
nSamples = nrow(datExpr)
if((nrow(datExpr)!=nrow(datExpr0))==TRUE){
datTraits<-datTraits[keepSamples,]
}
# Re-cluster samples
sampleTree2 = hclust(dist(datExpr), method = "average")
traitColors = numbers2colors(datTraits, signed = FALSE);
plotDendroAndColors(sampleTree2, traitColors,
groupLabels = names(datTraits),
marAll=c(1,11,3,3),
main=paste0("Repeat ",how," Module TxBiotype Correlation Samples"))
readkey()
if(saveToFile==TRUE){
pdf(paste0("RepeatMM_",how,"_TxBiotype_Correlation_Samples.pdf"),width=12,height=9)
plotDendroAndColors(sampleTree2, traitColors,
groupLabels = names(datTraits),
marAll=c(1,11,3,3),
main=paste0("Repeat ",how," Module TxBiotype Correlation Samples"))
dev.off()
}
# Choose a set of soft-thresholding powers
if(is.null(selectedPower)==TRUE){
powers = c(c(1:10), seq(from = 12, to=20, by=2))
# Call the network topology analysis function
sft = pickSoftThreshold(datExpr, powerVector = powers, verbose = 5)
# Plot the results:
sizeGrWindow(9, 5)
par(mfrow = c(1,2));
cex1 = 0.9;
# Scale-free topology fit index as a function of the soft-thresholding power
y<-( -sign(sft$fitIndices[,3])*sft$fitIndices[,2])
x<-sft$fitIndices[,1]
plot(x,y,
xlab="Soft Threshold (power)",
ylab="Scale Free Topology Model Fit,signed R^2",
type="n",
main = paste("Scale independence"));
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
labels=powers,cex=cex1,col="red");
# this line corresponds to using an R^2 cut-off of h
abline(h=0.80,col="red")
# Mean connectivity as a function of the soft-thresholding power
readkey()
y2<-sft$fitIndices[,5]
plot(x, y2,
xlab="Soft Threshold (power)",ylab="Mean Connectivity",
type="n",
main = paste("Mean connectivity"))
text(x, y2,
labels=powers,cex=cex1,col="red");
selectedPower<-readPower()
if(saveToFile==TRUE){
pdf(paste0("RepeatModule_",how,"_soft_ThresholdPower.pdf"),width=12,height=9)
plot(x,y,
xlab=paste0("RE ",how," Soft Threshold (power)"),
ylab="RE Scale Free Topology Model Fit,signed R^2",
type="n",
main = paste("RE Scale independence"));
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
labels=powers,cex=cex1,col="red");
abline(h=0.80,col="red")
y2<-sft$fitIndices[,5]
plot(x, y2,
xlab="RE Soft Threshold (power)",ylab="Mean Connectivity", type="n",
main = paste("RE Mean connectivity"))
text(x, y2,
labels=powers,cex=cex1,col="red");
dev.off()
} #save PDF
} #selectedPower NULL
message("annotating...")
datExpr<-as.data.frame(datExpr,stringsAsFactors=FALSE)
####### ensembl_gene_id entrezgene hgnc_symbol description add data here
annot<-DataFrame(rowRanges(rexp)[!duplicated(rowRanges(rexp)$tx_id)])
txID<-grep("tx_id",colnames(annot))
entrezID<-grep("entrezid",colnames(annot))
geneID<-grep("gene_id",colnames(annot))
txBioID<-grep("tx_biotype",colnames(annot))
annot<-annot[,c(txID,entrezID,geneID,txBioID)]
annot<-annot[,!grepl("^X.",colnames(annot))]
colnames(annot)<-c("ensembl_gene_id","entrezid","hgnc_symbol","description")
annot$entrezgene<-"NA"
annot<-as.data.frame(annot)
if(whichWGCNA=="single"){
##auto####################################################
datExpr<-as.data.frame(datExpr,stringsAsFactors=FALSE)
#############################
enableWGCNAThreads()
net = blockwiseModules(datExpr, power = selectedPower,
networkType="signed",
corType="bicor",
TOMType = "signed", minModuleSize = 30,
reassignThreshold = 0, mergeCutHeight = 0.25,
numericLabels = TRUE, pamRespectsDendro = FALSE,
saveTOMs = TRUE,
saveTOMFileBase = "rwasingleTOM",
verbose = 3)
# Convert labels to colors for plotting
# Plot the dendrogram and the module colors underneath
bwLabels<-net$colors ###for saving
bwModuleColors = labels2colors(net$colors)
MEs = net$MEs; ##use the module network calculation, do not recalculate 2nd time
#plots each gene tree one by one
wgcna_plotAll_dendrograms(bwnet=net,whichWGCNA="single",bwModuleColors=bwModuleColors,bwLabels=bwLabels,how=how,byWhich="repeat")
nGenes = ncol(datExpr);
nSamples = nrow(datExpr);
geneTree = net$dendrograms;
if(useBiCor==TRUE){
moduleTraitCor<-bicor(MEs,datTraits)
moduleTraitPvalue = bicorAndPvalue(MEs,datTraits,use="pairwise.complete.obs",alternative="two.sided")[["p"]]
modulePvalFisher<-corPvalueFisher(moduleTraitCor,nSamples)
} else {
moduleTraitCor = cor(MEs, datTraits, use = "p");
moduleTraitPvalue = corPvalueStudent(moduleTraitCor, nSamples);
modulePvalFisher<-corPvalueFisher(moduleTraitCor,nSamples)
}
rnames <-list(datExpr=datExpr,
datTraits=datTraits,
annot=annot,
MEs=MEs,
moduleLabels=bwLabels,
moduleColors=bwModuleColors,
geneTree=geneTree,
moduleTraitCor=moduleTraitCor,
moduleTraitPvalue=moduleTraitPvalue,
modulePvalFisher=modulePvalFisher,
usedbiCor=useBiCor,
how=how,
byWhich="repeat")
} ##single block should have 1 module per datTraits column
if(whichWGCNA=="block"){
##############BLOCK LEVEL ###################
message("networking...")
datExpr<-as.data.frame(datExpr,stringsAsFactors=FALSE)
##############################################
enableWGCNAThreads()
bwnet = blockwiseModules(datExpr,
maxBlockSize = 4000,
power = selectedPower,
networkType="signed",
TOMType = "signed",
corType="bicor",
minModuleSize = 30,
reassignThreshold = 0,
mergeCutHeight = 0.25,
numericLabels = TRUE,
saveTOMs = TRUE,
saveTOMFileBase = "rwaTOM-blockwise",
verbose = 3)
# Load the results of single-block analysis
bwLabels = matchLabels(bwnet$colors,bwnet$colors)
# Convert labels to colors for plotting
bwModuleColors = labels2colors(bwLabels)
geneTree<-bwnet$dendrograms
save(bwnet,file=paste("bwnet_",how,"_",selectedPower,".RData"),compress=TRUE)
# open a graphics window
sizeGrWindow(6,6)
########################################################################
##plot gene tree one by one
wgcna_plotAll_dendrograms(bwnet=bwnet,whichWGCNA="block",bwModuleColors=bwModuleColors,bwLabels=bwLabels,how=how,byWhich="repeat")
# this line corresponds to using an R^2 cut-off of h
# Recalculate MEs with color labels
nGenes = ncol(datExpr);
nSamples = nrow(datExpr);
MEs0 = moduleEigengenes(datExpr, bwModuleColors)$eigengenes
MEs = orderMEs(MEs0)
if(useBiCor==TRUE){
moduleTraitCor<-bicor(MEs,datTraits)
moduleTraitPvalue = bicorAndPvalue(MEs,datTraits,use="pairwise.complete.obs",alternative="two.sided")[["p"]]
modulePvalFisher<-corPvalueFisher(moduleTraitCor,nSamples)
}else{
moduleTraitCor = cor(MEs, datTraits, use = "p");
moduleTraitPvalue = corPvalueStudent(moduleTraitCor, nSamples);
modulePvalFisher<-corPvalueFisher(moduleTraitCor,nSamples)
}
rnames<-list(datExpr=datExpr,
datTraits=datTraits,
annot=annot,
MEs=MEs,
moduleLabels=bwLabels,
moduleColors=bwModuleColors,
geneTree=geneTree,
moduleTraitCor=moduleTraitCor,
moduleTraitPvalue=moduleTraitPvalue,
modulePvalFisher=modulePvalFisher,
biCor=useBiCor,
how=how,
byWhich="repeat")
} ##by block
if(saveToFile==TRUE){
save(rnames,file=paste0("wgcna.",how,"_",selectedPower,".dataInput.RData"),compress=TRUE)
cat("done.\n")
dev.off()
}
return(rnames)
}#main
|
/R/wrcna.R
|
no_license
|
arcolombo/rToolKit
|
R
| false
| false
| 14,084
|
r
|
#' @title creates a network module at the repeat level to investigate repeat initiating pathways
#' @description this differs from wgcna.R, where wgcna.R create phenotypic module analysis of genes and phenotypic repeat data. wrna will run a single/block wise adjacency/correlation matrices of repeats into modules of transcripts and also returns an object ready for wgcnaDbLite that includes repeat phenotypic data. This uses the recommended 'biocor' function which is a bi-weight mid-correlation calculation. For normalization, the default uses tmm normalization and a log2 transformation for each cpm for genes, We use 'signed' networks based on the FAQ. This will create the blockwise/single module data frame and run the soft-thresholding and create a correlation heatmap. the downstream method is wgcna_analsyis which investigates specific module color and specific biotype (phenotypic relationship). tx_biotype is included as phenotypic data
#' @param kexp a kexp 2 group stage is preferred
#' @param read.cutoff integer floor filter
#' @param minBranch integer for cluter min
#' @param whichWGCNA character single or block analysis, block is more sensitive
#' @param entrezOnly boolean, soon to be deprecated because entrez is auto filtered when enrichment testing
#' @param species char, mouse or humans
#' @param selectedPower 6 usually is good. can rerun if NULL
#' @param intBiotypes character the tx_biotypes of interest
#' @param useAllBiotypes boolean if false then intBiotypes are used, if true than the correlations are checked against all tx_biotypes
#' @param copyNormalize boolean, if true will execute copyNumberNormalize to normalize the repeat biotype counts by the copy number summation of the corresponding family type copy numbers find from repBase.
#' @import WGCNA
#' @import edgeR
#' @import limma
#' @export
#' @return images and cluster at the gene and repeat level
wrcna<-function(kexp,read.cutoff=2,minBranch=2,whichWGCNA=c("single","block"),species=c("Homo.sapiens","Mus.musculus"),selectedPower=6, intBiotypes=c("acromeric","centromeric","CR1","Alu","DNA transposon","Endogenous Retrovirus","ERV1","ERV3","ERVK","ERVL","hAT","HSFAU","L1","L2","LTR Retrotransposon","Eutr1","Merlin","PiggyBac","Pseudogene","Repetitive element","satellite","snRNA","SVA","TcMar","telo","Transposable Element","Satellite"),useAllBiotypes=FALSE,tmm.norm=TRUE,useBiCor=TRUE,how=c("cpm","tpm"), design=NULL,saveToFile=FALSE){
if(nrow(kexp)>20000){
kexp<-findRepeats(kexp)
}
how<-match.arg(how,c("cpm","tpm"))
byWhich<-"repeat"
##prepare data
whichWGCNA<-match.arg(whichWGCNA,c("single","block"))
species<-match.arg(species,c("Homo.sapiens","Mus.musculus"))
rexp<-findRepeats(kexp)
if(how=="cpm"){
cpm<-collapseBundles(rexp,"tx_id",read.cutoff=read.cutoff)
cpm<-cpm[!grepl("^ERCC",rownames(cpm)),]
cpm<-cpm[!grepl("^ENS",rownames(cpm)),]
rpm<-collapseBundles(rexp,"tx_biotype",read.cutoff=read.cutoff)
rpm<-rpm[!grepl("^ERCC",rownames(rpm)),]
if(tmm.norm==TRUE){
d<-DGEList(counts=cpm)
d<-calcNormFactors(d)
rd<-DGEList(counts=rpm)
rd<-calcNormFactors(rd)
if(is.null(design)==TRUE){
message('cpm normalization')
cpm.norm<-cpm(d,normalized.lib.sizes=TRUE,log=FALSE)
rdm.norm<-cpm(rd,normalized.lib.sizes=TRUE,log=FALSE)
rpm<-log2(1+rdm.norm)
cpm<-log2(1+cpm.norm)
cpm.norm<-NULL
rdm.norm<-NULL
}else{
stopifnot(all(rownames(design)==colnames(kexp)))
message('voom-ing')
res.voom<-voom(d,design)
cpm.norm<-res.voom$E ##log2 normalized
rep.norm<-voom(rd,design)
rdm.norm<-rep.norm$E ##log2 normalized tx_biotypes
rpm<-rdm.norm ##log2 norm
cpm<-cpm.norm #log2 norm
cpm.norm<-NULL
rdm.norm<-NULL
cpm<-log2(1+cpm)
rpm<-log2(1+rpm)
}##if design is input
}# tmm.norm==TRUE
}else if(how=="tpm"){
cpm<-collapseTpm(rexp,"gene_id",read.cutoff=read.cutoff)
cpm<-cpm[!grepl("^ERCC",rownames(cpm)),]
cpm<-cpm[!grepl("^ENS",rownames(cpm)),]
rpm<-collapseTpm(rexp,"tx_biotype",read.cutoff=read.cutoff)
rpm<-rpm[!grepl("^ERCC",rownames(rpm)),]
cpm<-log2(1+cpm)
rpm<-log2(1+rpm) ##log2 transform of repeats.
}
datExpr0<-t(cpm)
gsg<-goodSamplesGenes(datExpr0,verbose=3)
##rows must be SAMPLES columns repeats
if(useAllBiotypes==FALSE){
#select columns of intBiotypes
stopifnot(is.null(intBiotypes)==FALSE)
rpm<-rpm[rownames(rpm)%in%intBiotypes,]
}
datTraits<-t(rpm)
datTraits<-as.data.frame(datTraits)
stopifnot(nrow(datExpr0)==nrow(datTraits))
if (!gsg$allOK)
{
# Optionally, print the gene and sample names that were removed:
if (sum(!gsg$goodGenes)>0)
printFlush(paste("Removing genes:", paste(names(datExpr0)[!gsg$goodGenes], collapse = ", ")));
if (sum(!gsg$goodSamples)>0)
printFlush(paste("Removing samples:", paste(rownames(datExpr0)[!gsg$goodSamples], collapse = ", ")));
# Remove the offending genes and samples from the data:
datExpr0 = datExpr0[gsg$goodSamples, gsg$goodGenes]
}
sampleTree = hclust(dist(datExpr0), method = "average");
# Plot the sample tree: Open a graphic output window of size 12 by 9 inches
# The user should change the dimensions if the window is too large or too small.
sizeGrWindow(12,9)
#pdf(file = "Plots/sampleClustering.pdf", width = 12, height = 9);
par(cex = 0.6);
par(mar = c(0,4,2,0))
plot(sampleTree, main =paste0("Sample clustering to detect outliers"),
sub="",
xlab="", cex.lab = 1.5,
cex.axis = 1.5, cex.main = 2)
cutHeight<-readHeight()
# Plot a line to show the cut
abline(h = cutHeight, col = "red");
readkey()
# Determine cluster under the line
clust = cutreeStatic(sampleTree, cutHeight = cutHeight, minSize = minBranch)
print(table(clust))
# clust 1 contains the samples we want to keep.
keepSamples = (clust!=0)
print(paste0("samples to omit ",colnames(rexp)[which(keepSamples==FALSE)]))
datExpr = datExpr0[keepSamples, ]
nGenes = ncol(datExpr)
nSamples = nrow(datExpr)
if((nrow(datExpr)!=nrow(datExpr0))==TRUE){
datTraits<-datTraits[keepSamples,]
}
# Re-cluster samples
sampleTree2 = hclust(dist(datExpr), method = "average")
traitColors = numbers2colors(datTraits, signed = FALSE);
plotDendroAndColors(sampleTree2, traitColors,
groupLabels = names(datTraits),
marAll=c(1,11,3,3),
main=paste0("Repeat ",how," Module TxBiotype Correlation Samples"))
readkey()
if(saveToFile==TRUE){
pdf(paste0("RepeatMM_",how,"_TxBiotype_Correlation_Samples.pdf"),width=12,height=9)
plotDendroAndColors(sampleTree2, traitColors,
groupLabels = names(datTraits),
marAll=c(1,11,3,3),
main=paste0("Repeat ",how," Module TxBiotype Correlation Samples"))
dev.off()
}
# Choose a set of soft-thresholding powers
if(is.null(selectedPower)==TRUE){
powers = c(c(1:10), seq(from = 12, to=20, by=2))
# Call the network topology analysis function
sft = pickSoftThreshold(datExpr, powerVector = powers, verbose = 5)
# Plot the results:
sizeGrWindow(9, 5)
par(mfrow = c(1,2));
cex1 = 0.9;
# Scale-free topology fit index as a function of the soft-thresholding power
y<-( -sign(sft$fitIndices[,3])*sft$fitIndices[,2])
x<-sft$fitIndices[,1]
plot(x,y,
xlab="Soft Threshold (power)",
ylab="Scale Free Topology Model Fit,signed R^2",
type="n",
main = paste("Scale independence"));
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
labels=powers,cex=cex1,col="red");
# this line corresponds to using an R^2 cut-off of h
abline(h=0.80,col="red")
# Mean connectivity as a function of the soft-thresholding power
readkey()
y2<-sft$fitIndices[,5]
plot(x, y2,
xlab="Soft Threshold (power)",ylab="Mean Connectivity",
type="n",
main = paste("Mean connectivity"))
text(x, y2,
labels=powers,cex=cex1,col="red");
selectedPower<-readPower()
if(saveToFile==TRUE){
pdf(paste0("RepeatModule_",how,"_soft_ThresholdPower.pdf"),width=12,height=9)
plot(x,y,
xlab=paste0("RE ",how," Soft Threshold (power)"),
ylab="RE Scale Free Topology Model Fit,signed R^2",
type="n",
main = paste("RE Scale independence"));
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
labels=powers,cex=cex1,col="red");
abline(h=0.80,col="red")
y2<-sft$fitIndices[,5]
plot(x, y2,
xlab="RE Soft Threshold (power)",ylab="Mean Connectivity", type="n",
main = paste("RE Mean connectivity"))
text(x, y2,
labels=powers,cex=cex1,col="red");
dev.off()
} #save PDF
} #selectedPower NULL
message("annotating...")
datExpr<-as.data.frame(datExpr,stringsAsFactors=FALSE)
####### ensembl_gene_id entrezgene hgnc_symbol description add data here
annot<-DataFrame(rowRanges(rexp)[!duplicated(rowRanges(rexp)$tx_id)])
txID<-grep("tx_id",colnames(annot))
entrezID<-grep("entrezid",colnames(annot))
geneID<-grep("gene_id",colnames(annot))
txBioID<-grep("tx_biotype",colnames(annot))
annot<-annot[,c(txID,entrezID,geneID,txBioID)]
annot<-annot[,!grepl("^X.",colnames(annot))]
colnames(annot)<-c("ensembl_gene_id","entrezid","hgnc_symbol","description")
annot$entrezgene<-"NA"
annot<-as.data.frame(annot)
if(whichWGCNA=="single"){
##auto####################################################
datExpr<-as.data.frame(datExpr,stringsAsFactors=FALSE)
#############################
enableWGCNAThreads()
net = blockwiseModules(datExpr, power = selectedPower,
networkType="signed",
corType="bicor",
TOMType = "signed", minModuleSize = 30,
reassignThreshold = 0, mergeCutHeight = 0.25,
numericLabels = TRUE, pamRespectsDendro = FALSE,
saveTOMs = TRUE,
saveTOMFileBase = "rwasingleTOM",
verbose = 3)
# Convert labels to colors for plotting
# Plot the dendrogram and the module colors underneath
bwLabels<-net$colors ###for saving
bwModuleColors = labels2colors(net$colors)
MEs = net$MEs; ##use the module network calculation, do not recalculate 2nd time
#plots each gene tree one by one
wgcna_plotAll_dendrograms(bwnet=net,whichWGCNA="single",bwModuleColors=bwModuleColors,bwLabels=bwLabels,how=how,byWhich="repeat")
nGenes = ncol(datExpr);
nSamples = nrow(datExpr);
geneTree = net$dendrograms;
if(useBiCor==TRUE){
moduleTraitCor<-bicor(MEs,datTraits)
moduleTraitPvalue = bicorAndPvalue(MEs,datTraits,use="pairwise.complete.obs",alternative="two.sided")[["p"]]
modulePvalFisher<-corPvalueFisher(moduleTraitCor,nSamples)
} else {
moduleTraitCor = cor(MEs, datTraits, use = "p");
moduleTraitPvalue = corPvalueStudent(moduleTraitCor, nSamples);
modulePvalFisher<-corPvalueFisher(moduleTraitCor,nSamples)
}
rnames <-list(datExpr=datExpr,
datTraits=datTraits,
annot=annot,
MEs=MEs,
moduleLabels=bwLabels,
moduleColors=bwModuleColors,
geneTree=geneTree,
moduleTraitCor=moduleTraitCor,
moduleTraitPvalue=moduleTraitPvalue,
modulePvalFisher=modulePvalFisher,
usedbiCor=useBiCor,
how=how,
byWhich="repeat")
} ##single block should have 1 module per datTraits column
if(whichWGCNA=="block"){
##############BLOCK LEVEL ###################
message("networking...")
datExpr<-as.data.frame(datExpr,stringsAsFactors=FALSE)
##############################################
enableWGCNAThreads()
bwnet = blockwiseModules(datExpr,
maxBlockSize = 4000,
power = selectedPower,
networkType="signed",
TOMType = "signed",
corType="bicor",
minModuleSize = 30,
reassignThreshold = 0,
mergeCutHeight = 0.25,
numericLabels = TRUE,
saveTOMs = TRUE,
saveTOMFileBase = "rwaTOM-blockwise",
verbose = 3)
# Load the results of single-block analysis
bwLabels = matchLabels(bwnet$colors,bwnet$colors)
# Convert labels to colors for plotting
bwModuleColors = labels2colors(bwLabels)
geneTree<-bwnet$dendrograms
save(bwnet,file=paste("bwnet_",how,"_",selectedPower,".RData"),compress=TRUE)
# open a graphics window
sizeGrWindow(6,6)
########################################################################
##plot gene tree one by one
wgcna_plotAll_dendrograms(bwnet=bwnet,whichWGCNA="block",bwModuleColors=bwModuleColors,bwLabels=bwLabels,how=how,byWhich="repeat")
# this line corresponds to using an R^2 cut-off of h
# Recalculate MEs with color labels
nGenes = ncol(datExpr);
nSamples = nrow(datExpr);
MEs0 = moduleEigengenes(datExpr, bwModuleColors)$eigengenes
MEs = orderMEs(MEs0)
if(useBiCor==TRUE){
moduleTraitCor<-bicor(MEs,datTraits)
moduleTraitPvalue = bicorAndPvalue(MEs,datTraits,use="pairwise.complete.obs",alternative="two.sided")[["p"]]
modulePvalFisher<-corPvalueFisher(moduleTraitCor,nSamples)
}else{
moduleTraitCor = cor(MEs, datTraits, use = "p");
moduleTraitPvalue = corPvalueStudent(moduleTraitCor, nSamples);
modulePvalFisher<-corPvalueFisher(moduleTraitCor,nSamples)
}
rnames<-list(datExpr=datExpr,
datTraits=datTraits,
annot=annot,
MEs=MEs,
moduleLabels=bwLabels,
moduleColors=bwModuleColors,
geneTree=geneTree,
moduleTraitCor=moduleTraitCor,
moduleTraitPvalue=moduleTraitPvalue,
modulePvalFisher=modulePvalFisher,
biCor=useBiCor,
how=how,
byWhich="repeat")
} ##by block
if(saveToFile==TRUE){
save(rnames,file=paste0("wgcna.",how,"_",selectedPower,".dataInput.RData"),compress=TRUE)
cat("done.\n")
dev.off()
}
return(rnames)
}#main
|
\name{lambdaMax}
\alias{lambdaMax}
\title{Maximum lambda}
\description{Compute the maximum \code{lambda}}
\usage{
lambdaMax(X)
}
\arguments{
\item{X}{a \emph{n}x\emph{p} data matrix.}
}
\details{Compute the largest value for regularization (maximum \code{lambda}) that gives the null model.
The maximum \code{lambda} is computed based on the input data matrix, and is the maximum element from column-wise multiplication of data matrix normalized by the
number of observations.
}
\value{a numeric value}
|
/man/lambdaMax.Rd
|
no_license
|
cran/XMRF
|
R
| false
| false
| 506
|
rd
|
\name{lambdaMax}
\alias{lambdaMax}
\title{Maximum lambda}
\description{Compute the maximum \code{lambda}}
\usage{
lambdaMax(X)
}
\arguments{
\item{X}{a \emph{n}x\emph{p} data matrix.}
}
\details{Compute the largest value for regularization (maximum \code{lambda}) that gives the null model.
The maximum \code{lambda} is computed based on the input data matrix, and is the maximum element from column-wise multiplication of data matrix normalized by the
number of observations.
}
\value{a numeric value}
|
# Implementation of the Composite Farm class #
#
# Usually used to represent a farming enterprise using a weighted sum of solutions from
# farms with different soiltypes, objective weights and/or distances to sugarbeet factories
#
####################################
################### Constructor #######################
# Create a new Farm object from its economic parameters
#######################################################
CompositeFarm <- function(farmParams,mou=NULL,mouweights=c(1.0),soildata,SBFactories=NULL,haulagePerTonnePerKm=0.12,maxSBHaulageDistance=200) {
farm=new("CompositeFarm")
farm@model=.jnew("jfm/r/CompositeFarmRepresentation")
rf=0
wt=c()
if ( length(soildata$X0.5) > 1 ){
rf=mean(soildata$RF)
wt=c(mean(soildata$X0.5),mean(soildata$X0.75),mean(soildata$X1.0),mean(soildata$X1.25),mean(soildata$X1.5),mean(soildata$X1.75),mean(soildata$X2.0),mean(soildata$X2.5))
} else {
rf=soildata$RF
wt=c(soildata$X0.5,soildata$X0.75,soildata$X1.0,soildata$X1.25,soildata$X1.5,soildata$X1.75,soildata$X2.0,soildata$X2.5)
}
sl=c(0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5)
for( i in 1:length(sl)){
if ( wt[i] > 0){
if ( !is.list(mou)){
mou=list(mou)
}
for( m in 1:length(mou)){
sfarm=Farm(farmParams)
if ( !is.null(mou[[m]])){
set(sfarm,mou[[m]])
}
if ( !is.null(SBFactories) ){
set.sugarbeet(sfarm,coordinates(soildata),SBFactories,haulagePerTonnePerKm,maxSBHaulageDistance)
}
.jcall(model(farm),"V","addFarm",model(sfarm),sl[i],rf,wt[i]*mouweights[m])
rm(sfarm) # probably doesn't do much
}
}
}
farm@cropNames=.jcall(farm@model,"[Ljava/lang/String;","cropNames")
farm
}
|
/R/CompositeFarm-class.R
|
no_license
|
cran/farmR
|
R
| false
| false
| 1,674
|
r
|
# Implementation of the Composite Farm class #
#
# Usually used to represent a farming enterprise using a weighted sum of solutions from
# farms with different soiltypes, objective weights and/or distances to sugarbeet factories
#
####################################
################### Constructor #######################
# Create a new Farm object from its economic parameters
#######################################################
CompositeFarm <- function(farmParams,mou=NULL,mouweights=c(1.0),soildata,SBFactories=NULL,haulagePerTonnePerKm=0.12,maxSBHaulageDistance=200) {
farm=new("CompositeFarm")
farm@model=.jnew("jfm/r/CompositeFarmRepresentation")
rf=0
wt=c()
if ( length(soildata$X0.5) > 1 ){
rf=mean(soildata$RF)
wt=c(mean(soildata$X0.5),mean(soildata$X0.75),mean(soildata$X1.0),mean(soildata$X1.25),mean(soildata$X1.5),mean(soildata$X1.75),mean(soildata$X2.0),mean(soildata$X2.5))
} else {
rf=soildata$RF
wt=c(soildata$X0.5,soildata$X0.75,soildata$X1.0,soildata$X1.25,soildata$X1.5,soildata$X1.75,soildata$X2.0,soildata$X2.5)
}
sl=c(0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5)
for( i in 1:length(sl)){
if ( wt[i] > 0){
if ( !is.list(mou)){
mou=list(mou)
}
for( m in 1:length(mou)){
sfarm=Farm(farmParams)
if ( !is.null(mou[[m]])){
set(sfarm,mou[[m]])
}
if ( !is.null(SBFactories) ){
set.sugarbeet(sfarm,coordinates(soildata),SBFactories,haulagePerTonnePerKm,maxSBHaulageDistance)
}
.jcall(model(farm),"V","addFarm",model(sfarm),sl[i],rf,wt[i]*mouweights[m])
rm(sfarm) # probably doesn't do much
}
}
}
farm@cropNames=.jcall(farm@model,"[Ljava/lang/String;","cropNames")
farm
}
|
#' Parse a .ini or a .cfg file
#'
#' @export
#' @param path (character) A single file path to read from on initialize, or
#' to write to on \code{write}
#' @details
#' \strong{Methods}
#' \describe{
#' \item{\code{read()}}{
#' Read a file
#' }
#' \item{\code{write(path)}}{
#' Write a file
#' - path: path to write the file to
#' }
#' \item{\code{get(x, fallback)}}{
#' Get a section
#' - x: section name to get
#' - fallback: (character) fallback value if 'x' not found
#' }
#' \item{\code{sections()}}{
#' Get all sections of a file
#' }
#' }
#' @usage NULL
#' @format NULL
#' @examples \dontrun{
#' # example file
#' gitfile <- system.file("examples", "gitconfig.ini", package = "inir")
#'
#' # instantiate new object with file path
#' (res <- ini(gitfile))
#'
#' # get file path
#' res$file
#'
#' # read file
#' res$read()
#'
#' # get section names
#' res$sections()
#'
#' # get contents of a single section
#' res$get("core")
#' ## or index to it via the "parsed" element
#' res$parsed$gitconfig.ini[['core']]
#'
#' # another example getting a section
#' res$get('remote "origin"')
#' res$parsed$gitconfig.ini[['remote "origin"']]
#'
#' # You can set a default value with the get() method
#' res$get("stuff") # returns NULL
#' res$get("stuff", "hello_world") # default value
#'
#' # write file
#' res$write(path = "myfile2.ini")
#' unlink("myfile2.ini")
#' }
ini <- function(path) {
Ini$new(file = path)
}
Ini <- R6::R6Class("Ini",
portable = FALSE,
public = list(
file = NA,
parsed = NA,
initialize = function(file) {
self$file <- file
},
print = function() {
cat(paste0("<<ini config file>> ", basename(self$file), ".\n"))
},
read = function() {
self$parsed <- ini_parse(self$file)
self$parsed
},
sections = function() {
names(self$parsed[[1]])
},
get = function(x, fallback) {
tmp <- self$parsed[[1]][[x]]
if (is.null(tmp)) {
if (missing(fallback)) NULL else fallback
} else {
tmp
}
},
write = function(path) {
ini_write(self$parsed[[1]], path)
}
)
)
|
/R/ini_parse2.R
|
permissive
|
sckott/inir
|
R
| false
| false
| 2,213
|
r
|
#' Parse a .ini or a .cfg file
#'
#' @export
#' @param path (character) A single file path to read from on initialize, or
#' to write to on \code{write}
#' @details
#' \strong{Methods}
#' \describe{
#' \item{\code{read()}}{
#' Read a file
#' }
#' \item{\code{write(path)}}{
#' Write a file
#' - path: path to write the file to
#' }
#' \item{\code{get(x, fallback)}}{
#' Get a section
#' - x: section name to get
#' - fallback: (character) fallback value if 'x' not found
#' }
#' \item{\code{sections()}}{
#' Get all sections of a file
#' }
#' }
#' @usage NULL
#' @format NULL
#' @examples \dontrun{
#' # example file
#' gitfile <- system.file("examples", "gitconfig.ini", package = "inir")
#'
#' # instantiate new object with file path
#' (res <- ini(gitfile))
#'
#' # get file path
#' res$file
#'
#' # read file
#' res$read()
#'
#' # get section names
#' res$sections()
#'
#' # get contents of a single section
#' res$get("core")
#' ## or index to it via the "parsed" element
#' res$parsed$gitconfig.ini[['core']]
#'
#' # another example getting a section
#' res$get('remote "origin"')
#' res$parsed$gitconfig.ini[['remote "origin"']]
#'
#' # You can set a default value with the get() method
#' res$get("stuff") # returns NULL
#' res$get("stuff", "hello_world") # default value
#'
#' # write file
#' res$write(path = "myfile2.ini")
#' unlink("myfile2.ini")
#' }
ini <- function(path) {
Ini$new(file = path)
}
Ini <- R6::R6Class("Ini",
portable = FALSE,
public = list(
file = NA,
parsed = NA,
initialize = function(file) {
self$file <- file
},
print = function() {
cat(paste0("<<ini config file>> ", basename(self$file), ".\n"))
},
read = function() {
self$parsed <- ini_parse(self$file)
self$parsed
},
sections = function() {
names(self$parsed[[1]])
},
get = function(x, fallback) {
tmp <- self$parsed[[1]][[x]]
if (is.null(tmp)) {
if (missing(fallback)) NULL else fallback
} else {
tmp
}
},
write = function(path) {
ini_write(self$parsed[[1]], path)
}
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bootstrap_mb.R
\name{mb.boot}
\alias{mb.boot}
\title{Moving block bootstrap for IRFs of identified SVARs}
\usage{
mb.boot(x, b.length = 15, n.ahead = 20, nboot = 500, nc = 1,
dd = NULL, signrest = NULL, itermax = 300, steptol = 200,
iter2 = 50)
}
\arguments{
\item{x}{SVAR object of class "svars"}
\item{b.length}{Integer. Length of each block}
\item{n.ahead}{Integer specifying the steps}
\item{nboot}{Integer. Number of bootstrap iterations}
\item{nc}{Integer. Number of processor cores (Not available on windows machines)}
\item{dd}{Object of class 'indepTestDist'. A simulated independent sample of the same size as the data.
If not supplied, it will be calculated by the function}
\item{signrest}{A list with vectors containing 1 and -1, e.g. c(1,-1,1), indicating a sign pattern of specific shocks to be tested
with the help of the bootstrap samples.}
\item{itermax}{Integer. Maximum number of iterations for DEoptim}
\item{steptol}{Numeric. Tolerance for steps without improvement for DEoptim}
\item{iter2}{Integer. Number of iterations for the second optimization}
}
\value{
A list of class "sboot" with elements
\item{true}{Point estimate of impulse response functions}
\item{bootstrap}{List of length "nboot" holding bootstrap impulse response functions}
\item{SE}{Bootstraped standard errors of estimated covariance decomposition
(only if "x" has method "Cramer von-Mises", or "Distance covariances")}
\item{nboot}{Number of bootstrap iterations}
\item{b_length}{Length of each block}
\item{point_estimate}{Point estimate of covariance decomposition}
\item{boot_mean}{Mean of bootstrapped covariance decompositions}
\item{signrest}{Evaluated sign pattern}
\item{sign_complete}{Frequency of appearance of the complete sign pattern in all bootstrapped covariance decompositions}
\item{sign_part}{Frequency of bootstrapped covariance decompositions which conform the complete predetermined sign pattern. If signrest=NULL,
the frequency of bootstrapped covariance decompositions that hold the same sign pattern as the point estimate is provided.}
\item{sign_part}{Frequency of single shocks in all bootstrapped covariance decompositions which accord to a specific predetermined sign pattern}
\item{cov_bs}{Covariance matrix of bootstrapped parameter in impact relations matrix}
\item{method}{Used bootstrap method}
}
\description{
Calculating confidence bands for impulse response via moving block bootstrap
}
\examples{
\donttest{
# data contains quarterly observations from 1965Q1 to 2008Q3
# x = output gap
# pi = inflation
# i = interest rates
set.seed(23211)
v1 <- vars::VAR(USA, lag.max = 10, ic = "AIC" )
x1 <- id.dc(v1)
summary(x1)
# impulse response analysis with confidence bands
# Checking how often theory based impact relations appear
signrest <- list(demand = c(1,1,1), supply = c(-1,1,1), money = c(-1,-1,1))
bb <- mb.boot(x1, b.length = 15, nboot = 500, n.ahead = 30, nc = 1, signrest = signrest)
summary(bb)
plot(bb, lowerq = 0.16, upperq = 0.84)
}
}
\references{
Brueggemann, R., Jentsch, C., and Trenkler, C. (2016). Inference in VARs with conditional heteroskedasticity of unknown form. Journal of Econometrics 191, 69-85.\cr
Herwartz, H., 2017. Hodges Lehmann detection of structural shocks -
An analysis of macroeconomic dynamics in the Euro Area, Oxford Bulletin of Economics and Statistics.
}
\seealso{
\code{\link{id.cvm}}, \code{\link{id.dc}}, \code{\link{id.ngml}}, \code{\link{id.cv}} or \code{\link{id.st}}
}
|
/man/mb.boot.Rd
|
permissive
|
AlexanderRitz/svars
|
R
| false
| true
| 3,548
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bootstrap_mb.R
\name{mb.boot}
\alias{mb.boot}
\title{Moving block bootstrap for IRFs of identified SVARs}
\usage{
mb.boot(x, b.length = 15, n.ahead = 20, nboot = 500, nc = 1,
dd = NULL, signrest = NULL, itermax = 300, steptol = 200,
iter2 = 50)
}
\arguments{
\item{x}{SVAR object of class "svars"}
\item{b.length}{Integer. Length of each block}
\item{n.ahead}{Integer specifying the steps}
\item{nboot}{Integer. Number of bootstrap iterations}
\item{nc}{Integer. Number of processor cores (Not available on windows machines)}
\item{dd}{Object of class 'indepTestDist'. A simulated independent sample of the same size as the data.
If not supplied, it will be calculated by the function}
\item{signrest}{A list with vectors containing 1 and -1, e.g. c(1,-1,1), indicating a sign pattern of specific shocks to be tested
with the help of the bootstrap samples.}
\item{itermax}{Integer. Maximum number of iterations for DEoptim}
\item{steptol}{Numeric. Tolerance for steps without improvement for DEoptim}
\item{iter2}{Integer. Number of iterations for the second optimization}
}
\value{
A list of class "sboot" with elements
\item{true}{Point estimate of impulse response functions}
\item{bootstrap}{List of length "nboot" holding bootstrap impulse response functions}
\item{SE}{Bootstraped standard errors of estimated covariance decomposition
(only if "x" has method "Cramer von-Mises", or "Distance covariances")}
\item{nboot}{Number of bootstrap iterations}
\item{b_length}{Length of each block}
\item{point_estimate}{Point estimate of covariance decomposition}
\item{boot_mean}{Mean of bootstrapped covariance decompositions}
\item{signrest}{Evaluated sign pattern}
\item{sign_complete}{Frequency of appearance of the complete sign pattern in all bootstrapped covariance decompositions}
\item{sign_part}{Frequency of bootstrapped covariance decompositions which conform the complete predetermined sign pattern. If signrest=NULL,
the frequency of bootstrapped covariance decompositions that hold the same sign pattern as the point estimate is provided.}
\item{sign_part}{Frequency of single shocks in all bootstrapped covariance decompositions which accord to a specific predetermined sign pattern}
\item{cov_bs}{Covariance matrix of bootstrapped parameter in impact relations matrix}
\item{method}{Used bootstrap method}
}
\description{
Calculating confidence bands for impulse response via moving block bootstrap
}
\examples{
\donttest{
# data contains quarterly observations from 1965Q1 to 2008Q3
# x = output gap
# pi = inflation
# i = interest rates
set.seed(23211)
v1 <- vars::VAR(USA, lag.max = 10, ic = "AIC" )
x1 <- id.dc(v1)
summary(x1)
# impulse response analysis with confidence bands
# Checking how often theory based impact relations appear
signrest <- list(demand = c(1,1,1), supply = c(-1,1,1), money = c(-1,-1,1))
bb <- mb.boot(x1, b.length = 15, nboot = 500, n.ahead = 30, nc = 1, signrest = signrest)
summary(bb)
plot(bb, lowerq = 0.16, upperq = 0.84)
}
}
\references{
Brueggemann, R., Jentsch, C., and Trenkler, C. (2016). Inference in VARs with conditional heteroskedasticity of unknown form. Journal of Econometrics 191, 69-85.\cr
Herwartz, H., 2017. Hodges Lehmann detection of structural shocks -
An analysis of macroeconomic dynamics in the Euro Area, Oxford Bulletin of Economics and Statistics.
}
\seealso{
\code{\link{id.cvm}}, \code{\link{id.dc}}, \code{\link{id.ngml}}, \code{\link{id.cv}} or \code{\link{id.st}}
}
|
library(RSQLite)
library(DBI)
library(tibble)
# Create a connection object with SQLite
conn <- dbConnect(
RSQLite::SQLite(),
'transactional/shiny_app/data/mtcars.sqlite3'
)
# Create a query to prepare the 'mtcars' table with additional 'uid', 'id',
# & the 4 created/modified columns
create_mtcars_query = "CREATE TABLE mtcars (
uid SERIAL PRIMARY KEY,
model TEXT,
mpg REAL,
cyl REAL,
disp REAL,
hp REAL,
drat REAL,
wt REAL,
qsec REAL,
vs TEXT,
am TEXT,
gear REAL,
carb REAL,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by TEXT,
modified_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
modified_by TEXT,
is_deleted BOOLEAN DEFAULT 0
)"
# dbExecute() executes a SQL statement with a connection object
# Drop the table if it already exists
dbExecute(conn, "DROP TABLE IF EXISTS mtcars")
# Execute the query created above
dbExecute(conn, create_mtcars_query)
# Read in the RDS file created in 'data_prep.R'
dat <- readRDS("transactional/data_prep/prepped/mtcars.RDS")
# Create 'id' column in 'dat' dataframe
uids <- lapply(1:nrow(dat), function(row_num) {
row_data <- digest::digest(dat[row_num, ])
})
# add uid column to the `dat` data frame
dat$uid <- unlist(uids)
# reorder the columns so `uid` is 1st
dat <- dat %>%
select(uid, everything())
# Fill in the SQLite table with the values from the RDS file
DBI::dbWriteTable(
conn,
name = "mtcars",
value = dat,
overwrite = FALSE,
append = TRUE
)
# List tables to confirm 'mtcars' table exists
dbListTables(conn)
# MUST disconnect from SQLite before continuing
dbDisconnect(conn)
|
/transactional/data_prep/db_init.R
|
no_license
|
charlesberthillon/shiny_crud
|
R
| false
| false
| 2,078
|
r
|
library(RSQLite)
library(DBI)
library(tibble)
# Create a connection object with SQLite
conn <- dbConnect(
RSQLite::SQLite(),
'transactional/shiny_app/data/mtcars.sqlite3'
)
# Create a query to prepare the 'mtcars' table with additional 'uid', 'id',
# & the 4 created/modified columns
create_mtcars_query = "CREATE TABLE mtcars (
uid SERIAL PRIMARY KEY,
model TEXT,
mpg REAL,
cyl REAL,
disp REAL,
hp REAL,
drat REAL,
wt REAL,
qsec REAL,
vs TEXT,
am TEXT,
gear REAL,
carb REAL,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by TEXT,
modified_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
modified_by TEXT,
is_deleted BOOLEAN DEFAULT 0
)"
# dbExecute() executes a SQL statement with a connection object
# Drop the table if it already exists
dbExecute(conn, "DROP TABLE IF EXISTS mtcars")
# Execute the query created above
dbExecute(conn, create_mtcars_query)
# Read in the RDS file created in 'data_prep.R'
dat <- readRDS("transactional/data_prep/prepped/mtcars.RDS")
# Create 'id' column in 'dat' dataframe
uids <- lapply(1:nrow(dat), function(row_num) {
row_data <- digest::digest(dat[row_num, ])
})
# add uid column to the `dat` data frame
dat$uid <- unlist(uids)
# reorder the columns so `uid` is 1st
dat <- dat %>%
select(uid, everything())
# Fill in the SQLite table with the values from the RDS file
DBI::dbWriteTable(
conn,
name = "mtcars",
value = dat,
overwrite = FALSE,
append = TRUE
)
# List tables to confirm 'mtcars' table exists
dbListTables(conn)
# MUST disconnect from SQLite before continuing
dbDisconnect(conn)
|
seed.list <- list(input.seed = 402, output.seed = 805, learning.seed = 201)
## problem setting
source("logistic.R")
n <- 400
M <- 4
K0 <- 3
##generate parameter
# param.seed <- 3
# true.param <- param.mixture.norm.generate(M, K=K0, seed = param.seed)
true.ratio <- rep(1/K0,K0)
true.weight <- matrix(c(0,0,2,2,0,-1,1,0,-2,0,-2,2),nrow=M,ncol=K0)
true.param <- list(ratio=true.ratio, weight=true.weight)
##model settings
beta <- 0.05 ##inverse covariance
phi <- c(0.1, 0.2, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, 7.0, 10)
# phi <- 0.1
K <- 5
learning.seed <- 12
init.df <- M+3
init.Sigma <- diag(M)
init.phi <- 1
iteration <- 1000
restart <- 10
update.order <- c(lva.update.parameters, lva.update.latent.variable, lva.update.auxilirary.variable)
dataset.num <- 20
result.list <- list()
# energies <- matrix(0, nrow=length(phi), ncol=iteration)
energies <- matrix(0, nrow = length(phi), ncol=dataset.num)
for(a.dataset.num in 1:dataset.num){
a.seed.list <- lapply(seed.list, function(x)(x+a.dataset.num))
##input
xrange <- c(-5,5)
x <- input.unif.generate(n,M,a.seed.list$input.seed, xrange)
##output
output.info <- output.mixture.logistic.generate(n, x, true.param, seed=a.seed.list$output.seed)
y <- output.info$output
label <- output.info$label
for(i in 1:length(phi)){
i.prior.hyperparameter <- list(phi=phi[i], beta=beta)
lva.result <- lva.estimation.main(update.order = update.order,
x = x,
y = y,
K = K,
prior.hyperparameter = i.prior.hyperparameter,
init.Sigma = init.Sigma,
init.phi = init.phi,
init.df = init.df,
iteration = iteration,
restart = restart,
learning.seed = a.seed.list$learning.seed,
trace.on = F)
result.list <- c(result.list, list(lva.result))
energies[i, a.dataset.num] <- lva.result$energy.trace
}
}
matplot.x <- matrix(rep(phi,dataset.num),nrow=length(phi), ncol=dataset.num)
matplot(x = matplot.x, y = energies, "l")
mean.energy <- rowMeans(energies)
plot(x = phi, y = mean.energy, "b")
|
/LVA_logistic/logistic_mixture_hyperparameter.R
|
permissive
|
fumish/LearningModels
|
R
| false
| false
| 2,463
|
r
|
seed.list <- list(input.seed = 402, output.seed = 805, learning.seed = 201)
## problem setting
source("logistic.R")
n <- 400
M <- 4
K0 <- 3
##generate parameter
# param.seed <- 3
# true.param <- param.mixture.norm.generate(M, K=K0, seed = param.seed)
true.ratio <- rep(1/K0,K0)
true.weight <- matrix(c(0,0,2,2,0,-1,1,0,-2,0,-2,2),nrow=M,ncol=K0)
true.param <- list(ratio=true.ratio, weight=true.weight)
##model settings
beta <- 0.05 ##inverse covariance
phi <- c(0.1, 0.2, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, 7.0, 10)
# phi <- 0.1
K <- 5
learning.seed <- 12
init.df <- M+3
init.Sigma <- diag(M)
init.phi <- 1
iteration <- 1000
restart <- 10
update.order <- c(lva.update.parameters, lva.update.latent.variable, lva.update.auxilirary.variable)
dataset.num <- 20
result.list <- list()
# energies <- matrix(0, nrow=length(phi), ncol=iteration)
energies <- matrix(0, nrow = length(phi), ncol=dataset.num)
for(a.dataset.num in 1:dataset.num){
a.seed.list <- lapply(seed.list, function(x)(x+a.dataset.num))
##input
xrange <- c(-5,5)
x <- input.unif.generate(n,M,a.seed.list$input.seed, xrange)
##output
output.info <- output.mixture.logistic.generate(n, x, true.param, seed=a.seed.list$output.seed)
y <- output.info$output
label <- output.info$label
for(i in 1:length(phi)){
i.prior.hyperparameter <- list(phi=phi[i], beta=beta)
lva.result <- lva.estimation.main(update.order = update.order,
x = x,
y = y,
K = K,
prior.hyperparameter = i.prior.hyperparameter,
init.Sigma = init.Sigma,
init.phi = init.phi,
init.df = init.df,
iteration = iteration,
restart = restart,
learning.seed = a.seed.list$learning.seed,
trace.on = F)
result.list <- c(result.list, list(lva.result))
energies[i, a.dataset.num] <- lva.result$energy.trace
}
}
matplot.x <- matrix(rep(phi,dataset.num),nrow=length(phi), ncol=dataset.num)
matplot(x = matplot.x, y = energies, "l")
mean.energy <- rowMeans(energies)
plot(x = phi, y = mean.energy, "b")
|
rm(list=ls())
# Set up initial condition
N0 <- 1000000 # total population
In0 <- 10 # initial infectives
S0 <- N0-In0 # initially, everyone else is susceptible
R0 <- 0 # initially, nobody has recovered
IC <- c(S=S0, In=In0, R=R0)
tmax = 2000 # number of years to run
# Parameter values (units: per year)
parameters <- c(d=0.02, # per capita birth and death rate
b=120, # infection transmission rate
r=100 # recovery rate
)
# Define the epidemic model
epimodel<-function(t, state, parameters) {
with(as.list(c(state, parameters)),{
N <- S+In+R
# rate of change
dS <- d*N - b*S*In/N - d*S
dIn <- b*S*In/N - r*In - d*In
dR <- r*In - d*R
# return the rate of change
list(c(dS, dIn, dR))
}) # end with(as.list ...
}
times <- seq(0, tmax, by = 1) # times to solve the system for
library(deSolve)
# Solve the system
traj <- ode(y = IC, times = times, func = epimodel, parms = parameters,
atol = 1e-7, rtol = 1e-5)
traj <- as.data.frame(traj)
plot(traj$time, traj$In, type="l",
xlab="time (years)", ylab="number infected",
ylim=c(0,100))
|
/runepidemic.R
|
no_license
|
jhcho0915/R-and-R-Studio
|
R
| false
| false
| 1,154
|
r
|
rm(list=ls())
# Set up initial condition
N0 <- 1000000 # total population
In0 <- 10 # initial infectives
S0 <- N0-In0 # initially, everyone else is susceptible
R0 <- 0 # initially, nobody has recovered
IC <- c(S=S0, In=In0, R=R0)
tmax = 2000 # number of years to run
# Parameter values (units: per year)
parameters <- c(d=0.02, # per capita birth and death rate
b=120, # infection transmission rate
r=100 # recovery rate
)
# Define the epidemic model
epimodel<-function(t, state, parameters) {
with(as.list(c(state, parameters)),{
N <- S+In+R
# rate of change
dS <- d*N - b*S*In/N - d*S
dIn <- b*S*In/N - r*In - d*In
dR <- r*In - d*R
# return the rate of change
list(c(dS, dIn, dR))
}) # end with(as.list ...
}
times <- seq(0, tmax, by = 1) # times to solve the system for
library(deSolve)
# Solve the system
traj <- ode(y = IC, times = times, func = epimodel, parms = parameters,
atol = 1e-7, rtol = 1e-5)
traj <- as.data.frame(traj)
plot(traj$time, traj$In, type="l",
xlab="time (years)", ylab="number infected",
ylim=c(0,100))
|
#NCS 데이터 탐색
phone<-read.csv('c:/Java/phone-01.csv', header=F)
summary(phone)
var(phone)
phone2 <- read.csv('c:/Java/phone-02.csv', header=F, sep=',')
summary(phone2)
#공분산으로 상관계수를 측정
cor(phone2) #공분산
#데이터 사용량 v9는 평균스마트폰 사용시간V7과 상관관계유의미.
#가장 높은 유의성을 갖는 변수가 데이터 사용량에 미치는 영향을 추정하여 단순회귀분석 실시.
p<-lm(phone2$V9~phone2$V7, data=phone2)
#회귀식 : y= -272.001 + 6.283*phone2$V7
summary(p)
plot(phone2$V9, phone2$V7, xlab='데이터 사용량', ylab='평균스마트폰 사용시')
runif(10)
|
/R/R/NCS데이터탐색.R
|
no_license
|
DreamingDataScientist/Learnning_R_Python
|
R
| false
| false
| 661
|
r
|
#NCS 데이터 탐색
phone<-read.csv('c:/Java/phone-01.csv', header=F)
summary(phone)
var(phone)
phone2 <- read.csv('c:/Java/phone-02.csv', header=F, sep=',')
summary(phone2)
#공분산으로 상관계수를 측정
cor(phone2) #공분산
#데이터 사용량 v9는 평균스마트폰 사용시간V7과 상관관계유의미.
#가장 높은 유의성을 갖는 변수가 데이터 사용량에 미치는 영향을 추정하여 단순회귀분석 실시.
p<-lm(phone2$V9~phone2$V7, data=phone2)
#회귀식 : y= -272.001 + 6.283*phone2$V7
summary(p)
plot(phone2$V9, phone2$V7, xlab='데이터 사용량', ylab='평균스마트폰 사용시')
runif(10)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/peek.R
\name{peek}
\alias{peek}
\alias{unpeek}
\title{Data Frame Viewing}
\usage{
peek(x, n = 10, width = 20, ...)
unpeek(x)
}
\arguments{
\item{x}{A \code{\link[base]{data.frame}} object.}
\item{n}{Number of rows to display.}
\item{width}{The width of the columns to be displayed.}
\item{\ldots}{For internal use.}
}
\value{
Prints a truncated head but invisibly returns \code{x}.
}
\description{
\code{peek} - Convenience function to view all the columns of the head
of a truncated \code{\link[base]{data.frame}}. \code{peek} invisibly returns
\code{x}. This makes its use ideal in a \pkg{dplyr}/\pkg{magrittr} pipeline.
\code{unpeek} - Strips out class \code{textreadr} so that the entire
\code{\link[base]{data.frame}} will be printed.
}
\details{
By default \pkg{dplyr} does not print all columns of a data frame
(\code{tbl_df}). This makes inspection of data difficult at times,
particularly with text string data. \code{peek} allows the user to see a
truncated head for inspection purposes.
}
\examples{
peek(mtcars)
peek(presidential_debates_2012)
}
\seealso{
\code{\link[utils]{head}}
}
|
/man/peek.Rd
|
no_license
|
bedantaguru/textreadr
|
R
| false
| true
| 1,183
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/peek.R
\name{peek}
\alias{peek}
\alias{unpeek}
\title{Data Frame Viewing}
\usage{
peek(x, n = 10, width = 20, ...)
unpeek(x)
}
\arguments{
\item{x}{A \code{\link[base]{data.frame}} object.}
\item{n}{Number of rows to display.}
\item{width}{The width of the columns to be displayed.}
\item{\ldots}{For internal use.}
}
\value{
Prints a truncated head but invisibly returns \code{x}.
}
\description{
\code{peek} - Convenience function to view all the columns of the head
of a truncated \code{\link[base]{data.frame}}. \code{peek} invisibly returns
\code{x}. This makes its use ideal in a \pkg{dplyr}/\pkg{magrittr} pipeline.
\code{unpeek} - Strips out class \code{textreadr} so that the entire
\code{\link[base]{data.frame}} will be printed.
}
\details{
By default \pkg{dplyr} does not print all columns of a data frame
(\code{tbl_df}). This makes inspection of data difficult at times,
particularly with text string data. \code{peek} allows the user to see a
truncated head for inspection purposes.
}
\examples{
peek(mtcars)
peek(presidential_debates_2012)
}
\seealso{
\code{\link[utils]{head}}
}
|
#' ---
#' title: "Introduction to Simulation-based Inference"
#' author: "Aaron A. King and Edward L. Ionides"
#' output:
#' html_document:
#' toc: yes
#' toc_depth: 4
#' bibliography: ../sbied.bib
#' csl: ../ecology.csl
#' nocite: |
#' @King2008, @Romero-Severson2015, @He2010,
#' @Laneri2010, @King2015
#' ---
#'
#' \newcommand\prob[1]{\mathbb{P}\left[{#1}\right]}
#' \newcommand\expect[1]{\mathbb{E}\left[{#1}\right]}
#' \newcommand\var[1]{\mathrm{Var}\left[{#1}\right]}
#' \newcommand\dist[2]{\mathrm{#1}\left(#2\right)}
#' \newcommand\dlta[1]{{\Delta}{#1}}
#' \newcommand\lik{\mathcal{L}}
#' \newcommand\loglik{\ell}
#'
#' [Licensed under the Creative Commons Attribution-NonCommercial license](http://creativecommons.org/licenses/by-nc/4.0/).
#' Please share and remix noncommercially, mentioning its origin.
#' 
#'
#' Produced in **R** version `r getRversion()` using **pomp** version `r packageVersion("pomp")`.
#'
## ----opts,include=FALSE,cache=FALSE--------------------------------------
options(stringsAsFactors=FALSE)
library(ggplot2)
theme_set(theme_bw())
set.seed(2028866059L)
#'
#' --------
#'
#' --------
#'
#' ## Introduction: ecological and epidemiological dynamics
#'
#' - Ecological systems are complex, open, nonlinear, and nonstationary.
#' - "Laws of Nature" are unavailable except in the most general form.
#' - It is useful to model them as stochastic systems.
#' - For any observable phenomenon, multiple competing explanations are possible.
#' - Central scientific goals:
#' - Which explanations are most favored by the data?
#' - Which kinds of data are most informative?
#' - Central applied goals:
#' - How to design ecological or epidemiological intervention?
#' - How to make accurate forecasts?
#' - Time series are particularly useful sources of data.
#'
#' <br>
#'
#' -----
#'
#' -----
#'
#' ### Noisy clockwork: Time series analysis of population fluctuations in animals
#'
#' ##### Six problems of @Bjornstad2001
#'
#' Obstacles for **ecological** modeling and inference via nonlinear mechanistic models:
#'
#' 1. Combining measurement noise and process noise.
#' 2. Including covariates in mechanistically plausible ways.
#' 3. Using continuous-time models.
#' 4. Modeling and estimating interactions in coupled systems.
#' 5. Dealing with unobserved variables.
#' 6. Modeling spatial-temporal dynamics.
#'
#' The same issues arise for **epidemiological** modeling and inference via nonlinear mechanistic models.
#'
#' The *partially observed Markov process* modeling framework we focus on in this course addresses most of these problems effectively.
#'
#' <br>
#'
#' ------
#'
#' ------
#'
#' ## Objectives
#'
#' 1. To show how stochastic dynamical systems models can be used as scientific instruments.
#' 1. To teach statistically and computationally efficient approaches for performing scientific inference using POMP models.
#' 1. To give students the ability to formulate models of their own.
#' 1. To give students opportunities to work with such inference methods.
#' 1. To familiarize students with the **pomp** package.
#' 1. To provide documented examples for adaptation and re-use.
#'
#' <br>
#'
#' -------
#'
#' -------
#'
#' ## Questions and answers
#'
#' 1. [What roles are played by asymptomatic infection and waning immunity in cholera epidemics?](http://dx.doi.org/10.1038/nature07084)
#' 7. [Do subclinical infections of pertussis play an important epidemiological role?](http://dx.doi.org/10.1371/journal.pone.0072086)
#' 3. [What explains the seasonality of measles?](http://dx.doi.org/10.1098/rsif.2009.0151)
#' 2. [What is the contribution to the HIV epidemic of dynamic variation in sexual behavior of an individual over time? How does this compare to the role of heterogeneity between individuals?](http://dx.doi.org/10.1093/aje/kwv044)
#' 5. [What explains the interannual variability of malaria?](http://dx.doi.org/10.1371/journal.pcbi.1000898)
#' 6. [What will happen next in an Ebola outbreak?](http://dx.doi.org/10.1098/rspb.2015.0347)
#' 1. [How does vaccine-induced immunity fail?](http://doi.org/10.1017/S0031182015000979)
#' 1. [Can hydrology explain the seasonality of cholera?](http://doi.org/10.1016/j.advwatres.2016.11.012)
#' 1. [What is the contribution of adults to polio transmission?](http://doi.org/10.1073/pnas.1323688111)
#'
#' <br>
#'
#' ------------
#'
#' -----------
#'
#' ## Partially observed Markov process (POMP) models
#'
#' * Data $y^*_1,\dots,y^*_N$ collected at times $t_1<\dots<t_N$ are modeled as noisy, incomplete, and indirect observations of a Markov process $\{X(t), t\ge t_0\}$.
#'
#' * This is a __partially observed Markov process (POMP)__ model, also known as a hidden Markov model or a state space model.
#'
#' * $\{X(t)\}$ is Markov if the history of the process, $\{X(s), s\le t\}$, is uninformative about the future of the process, $\{X(s), s\ge t\}$, given the current value of the process, $X(t)$.
#'
#' * If all quantities important for the dynamics of the system are placed in the __state__, $X(t)$, then the Markov property holds by construction.
#'
#' * Systems with delays can usually be rewritten as Markovian systems, at least approximately.
#'
#' * An important special case: any system of differential equations is Markovian.
#'
#' * POMP models can include all the features desired by @Bjornstad2001.
#'
#' <br>
#'
#' -----------------
#'
#' ------------------------------
#'
#' ### Schematic of the structure of a POMP
#'
#' - Arrows in the following diagram show causal relations.
#'
#' <img src="pomp_schematic1.png" width="400" />
#'
#'
#' - A key perspective to keep in mind is that **the model is to be viewed as the process that generated the data**.
#'
#'
#' <br>
#'
#' ---------------------
#'
#' ---------------------
#'
#' #### Notation for partially observed Markov process models
#'
#' * Write $X_n=X(t_n)$ and $X_{0:N}=(X_0,\dots,X_N)$. Let $Y_n$ be a random variable modeling the observation at time $t_n$.
#'
#' * The one-step transition density, $f_{X_n|X_{n-1}}(x_n|x_{n-1};\theta)$, together with the measurement density, $f_{Y_n|X_n}(y_n|x_n;\theta)$ and the initial density, $f_{X_0}(x_0;\theta)$, specify the entire joint density via
#'
#' $$f_{X_{0:N},Y_{1:N}}(x_{0:N},y_{1:N};\theta) = f_{X_0}(x_0;\theta)\,\prod_{n=1}^N\!f_{X_n | X_{n-1}}(x_n|x_{n-1};\theta)\,f_{Y_n|X_n}(y_n|x_n;\theta).$$
#'
#' * The marginal density for sequence of measurements, $Y_{1:N}$, evaluated at the data, $y_{1:N}^*$, is
#'
#' $$ f_{Y_{1:N}}(y^*_{1:N};\theta)=\int f_{X_{0:N},Y_{1:N}}(x_{0:N},y^*_{1:N};\theta)\, dx_{0:N}.$$
#'
#' <br>
#'
#' ------------------------------
#'
#' ------------------------------
#'
#' ### Another POMP model schematic
#'
#' - In the following diagram, arrows show dependence among model variables:
#'
#'
#' - The state process, $X_n$, is Markovian, i.e.,
#'
#' $$f_{X_n|X_{0:n-1},Y_{1:n-1}}(x_n|x_{0:n-1},y_{1:n-1})=f_{X_n|X_{n-1}}(x_n|x_{n-1}).$$
#'
#' - Moreover, the measurable random variable, $Y_n$, depends only on the state at that time:
#' $$f_{Y_n|X_{0:N},Y_{1:n-1}}(y_n|x_{0:n},y_{1:n-1})=f_{Y_n|X_{n}}(y_n|x_n),$$
#' for all $n=1,\dots,N$.
#'
#' <br>
#'
#' -----------------
#'
#' ----------------
#'
#' ### Algorithms for POMP models
#'
#' To think algorithmically, we define some function calls:
#'
#' * `rprocess( )`: a draw from $f_{X_n|X_{n-1}}(x_n| x_{n-1};\theta)$
#'
#' * `dprocess( )`: evaluation of $f_{X_n|X_{n-1}}(x_n| x_{n-1};\theta)$
#'
#' * `rmeasure( )`: a draw from $f_{Y_n|X_n}(y_n| x_n;\theta)$
#'
#' * `dmeasure( )`: evaluation of $f_{Y_n|X_n}(y_n| x_n;\theta)$
#'
#' * `initializer( )`: a draw from $f_{X_0}(x_0;\theta)$
#'
#' <br>
#'
#' -------------
#'
#' -------------
#'
#' ### What does it mean for methodology to be __simulation-based__?
#'
#' * Simulating random processes is often much easier than evaluating their transition probabilities.
#'
#' * In other words, we may be able to write `rprocess()` but not `dprocess()`.
#'
#' * __Simulation-based__ methods require the user to specify `rprocess()` but not `dprocess()`.
#'
#' * __Plug-and-play__, __likelihood-free__ and __equation-free__ are alternative terms for "simulation-based" methods.
#'
#' * Much development of simulation-based statistical methodology has occurred in the past decade.
#'
#' <br>
#'
#' ------------
#'
#' ------------
#'
#' ## The **pomp** package for POMP models
#'
#' * **pomp** is an **R** package for data analysis using partially observed Markov process (POMP) models.
#'
#' * Note the distinction: lower case **pomp** is a software package;
#' upper case POMP is a class of models.
#'
#' * **pomp** builds methodology for POMP models in terms of arbitrary user-specified `rprocess()`, `dprocess()`, `rmeasure()`, and `dmeasure()` functions.
#'
#' * Following modern practice, most methodology in **pomp** is simulation-based, so does not require specification of `dprocess()`.
#'
#' * **pomp** has facilities to help construct `rprocess()`, `rmeasure()`, and `dmeasure()` functions for model classes of epidemiological interest.
#'
#' * **pomp** provides a forum for development, modification and sharing of models, methodology and data analysis workflows.
#'
#' <br>
#'
#' ---------
#'
#' ---------
#'
#' ### Example
#'
#' #### The deterministic Ricker map
#'
#' - The Ricker map describes the deterministic dynamics of a simple population,
#' $$N_{t+1} = r\,N_{t}\,\exp\big(-c\,N_{t}\big).$$
#' + $N_t$ is the population density at time $t$.
#' + $r$ is a fixed value (a parameter) describing the population's intrinsic capacity to increase in one unit of time.
#' + The parameter $c$ scales the density-dependent population regulation.
#' + The equilibrium population is $N_t=\log(r)/c$.
#'
#' - $N$ is a *state variable*, $r$ and $c$ are *parameters*. $r$ is dimensionless and $c$ has units of inverse density.
#'
#' - For simplicity, we will fix $c=1$ for the remainder of this document.
#'
#' - If we know $r$ and the *initial condition* $N_0$, the deterministic Ricker equation predicts the future population density at all times $t=1,2,\dots$.
#'
#' - We can view the initial condition, $N_0$ as a special kind of parameter, an *initial-value parameter*.
#'
#' <br>
#'
#' ---------
#'
#' --------
#'
#' #### Process noise
#'
#' - We can model process noise in this system by making the growth rate into a random variable with mean $r$.
#'
#' - For example, if we assume that the intrinsic growth rate is log-normally distributed, $N$ becomes a stochastic process governed by
#' $$N_{t+1} = r\,N_{t}\,\exp(-c\,N_{t}+\varepsilon_{t}), \qquad \varepsilon_{t}\;\sim\;\dist{Normal}{0,\sigma},$$
#' where the new parameter $\sigma$ is the standard deviation of the noise process $\varepsilon$.
#'
#' <br>
#'
#' -----------
#'
#' ----------
#'
#' #### Measurement error
#'
#' - Let's suppose that the Ricker model is our model for the dynamics of a real population.
#'
#' - However, we cannot know the exact population density at any time, but only estimate it through sampling.
#'
#' - Let's model measurement error by assuming the observed measurement, $y_t$, is modeled as a realization of a random variable $Y_t$ that is Poisson with mean $\phi\,N_t$:
#' $$Y_{t}\;\sim\;\dist{Poisson}{\phi\,N_{t}}$$
#'
#' - In this equation,
#'
#' 1. $N_t$ models the true population density at time $t$,
#' 2. $Y_t$ models the number of individuals sampled at time $t$,
#' 3. the parameter $\phi$ is proportional to our sampling effort.
#' 4. $Y_t$ is dimensionless, so $\phi N_t$ must also be dimensionless.
#'
#' <br>
#'
#' ------
#'
#' -----
#'
#' ### Working with the Ricker model in **pomp**.
#'
#' - The **R** package **pomp** provides facilities for modeling POMPs, a toolbox of statistical inference methods for analyzing data using POMPs, and a development platform for implementing new POMP inference methods.
#'
#' - The basic data-structure provided by **pomp** is the *object of class* `pomp`, alternatively known as a "pomp object".
#'
#' - It is a container that holds real or simulated data and a POMP model, possibly together with other information such as model parameters, that may be needed to do things with the model and data.
#'
#' Let's see what can be done with a pomp object.
#' First, we'll load some packages, including **pomp**.
#'
## ----prelims,cache=F-----------------------------------------------------
library(ggplot2)
library(reshape2)
library(pomp)
stopifnot(packageVersion("pomp")>="1.12")
#'
#' A pre-built pomp object encoding the Ricker model comes included with the package. Load it by
#'
## ----load-ricker,cache=FALSE,results="hide"------------------------------
pompExample(ricker)
#'
#' This has the effect of creating a pomp object named `ricker` in your workspace.
#' We can plot the data by doing
#'
## ----plot-ricker---------------------------------------------------------
plot(ricker)
#'
#' We can simulate by doing
#'
## ----sim-ricker1---------------------------------------------------------
x <- simulate(ricker)
#'
#' What kind of object have we created?
#'
## ------------------------------------------------------------------------
class(x)
plot(x)
#'
#' Why do we see more time series in the simulated pomp object than in the original?
#'
#' We can turn a pomp object into a data frame:
#'
## ------------------------------------------------------------------------
y <- as.data.frame(ricker)
head(y)
head(simulate(ricker,as.data.frame=TRUE))
#'
#' We can also run multiple simulations simultaneously:
#'
## ------------------------------------------------------------------------
x <- simulate(ricker,nsim=10)
class(x)
sapply(x,class)
x <- simulate(ricker,nsim=10,as.data.frame=TRUE)
head(x)
str(x)
#'
#' It's often useful to plot several simulations from a model against the actual data.
#' One way to accomplish this is as follows.
#'
## ----fig.height=8--------------------------------------------------------
x <- simulate(ricker,nsim=9,as.data.frame=TRUE,include.data=TRUE)
ggplot(data=x,aes(x=time,y=y,group=sim,color=(sim=="data")))+
geom_line()+guides(color=FALSE)+
facet_wrap(~sim,ncol=2)
#'
#' We refer to the deterministic map as the "skeleton" of the stochastic map.
#'
#' We can compute a trajectory of the the deterministic skeleton using `trajectory`:
#'
## ----traj-ricker---------------------------------------------------------
y <- trajectory(ricker)
dim(y) #2 1 51
dimnames(y) #variable 'N' and 'e'
plot(time(ricker),y["N",1,],type="l")
#'
#'
#' We can extract or set the parameters in the pomp object using `coef`:
#'
## ----coef----------------------------------------------------------------
coef(ricker)
coef(ricker,"phi")
coef(ricker) <- c(phi=20,c=1,r=44,sigma=0.3,N.0=10,e.0=0)
coef(ricker)
coef(ricker,c("phi","c")) <- c(10,2)
coef(ricker)
#'
#' Note that the order in which the parameters appear is irrelevant.
#'
#' More information on manipulating and extracting information from pomp objects can be viewed in the help pages (`methods?pomp`).
#'
#' There are a number of other examples included with the package.
#' Do `pompExample()` to see a list of these.
#'
#' More examples can be found in the **pompExamples** package:
#'
#'
#' <br>
#'
#' -------
#'
#' -------
#'
#' ### Inference algorithms in **pomp**
#'
#' **pomp** provides a wide range of inference algorithms.
#' We'll learn about these in detail soon, but for now, let's just look at some of their general features.
#'
#' The `pfilter` function runs a simple particle filter.
#' It can be used to evaluate the likelihood at a particular set of parameters.
#' One uses the `Np` argument to specify the number of particles to use:
#'
## ----reset-ricker,include=FALSE------------------------------------------
pompExample(ricker)
#'
#'
## ----pfilter1------------------------------------------------------------
pf <- pfilter(ricker,Np=1000)
class(pf)
plot(pf)
logLik(pf)
#'
#' Note that `pfilter` returns an object of class `pfilterd.pomp`.
#' This is the general rule: inference algorithms return objects that are pomp objects with additional information.
#'
#' The package provides tools to extract this information.
#' We can run the particle filter again by doing
#'
## ----pfilter2------------------------------------------------------------
pf <- pfilter(pf)
logLik(pf)
#'
#' This the result of running the same computation again.
#' Note that, because the particle filter is a Monte Carlo algorithm, we get a slightly different estimate of the log likelihood.
#'
#' Note that, by default, running `pfilter` on a `pfilterd.pomp` object causes the computation to be re-run with the same parameters as before.
#' Any additional arguments we add override these defaults.
#'
#' This is the general rule in **pomp**.
#' For example,
#'
## ----pfilter3------------------------------------------------------------
pf <- pfilter(pf,Np=100)
logLik(pf)
#'
#' Here, the particle filtering has been performed with only `r unique(pf@Np)` particles.
#'
#' <br>
#'
#' -------
#'
#' ------
#'
#' ### Building a custom pomp object
#'
#' The usefulness of **pomp** in scientific research hinges on its facilities for implementing the full range of POMP models.
#' To get started building custom `pomp` models, see this [introductory tutorial](./ricker.html).
#'
#' <br>
#'
#' ------
#'
#' ------
#'
#' #### A note on terminology
#'
#' If we know the state, $x(t_0)$, of the system at time $t_0$, it makes sense to speak about the entire trajectory of the system for all $t>t_0$.
#' This is true whether we are thinking of the system as deterministic or stochastic.
#' Of course, in the former case, the trajectory is uniquely determined by $x(t_0)$, while in the stochastic case, only the probability distribution of $x(t)$, $t>t_0$ is determined.
#' To avoid confusion, we use the term "trajectory" exclusively to refer to *trajectories of a deterministic process*.
#' Thus, the `trajectory` command iterates or integrates the deterministic skeleton forward in time, returning the unique trajectory determined by the specified parameters.
#' When we want to speak about sample paths of a stochastic process, we use the term *simulation*.
#' Accordingly, the `simulate` command always returns individual sample paths from the POMP.
#' In particular, we avoid "simulating a set of differential equations", preferring instead to speak of "integrating" the equations, or "computing trajectories".
#'
#' ------------------------------
#'
#' ## [Back to course homepage](../index.html)
#' ## [**R** codes for this document](http://raw.githubusercontent.com/kingaa/sbied/master/intro/intro.R)
#'
#' ----------------------
#'
#' ## References
|
/2016/likelihood estimation/code/intro.R
|
no_license
|
hendersonad/SISMID
|
R
| false
| false
| 18,859
|
r
|
#' ---
#' title: "Introduction to Simulation-based Inference"
#' author: "Aaron A. King and Edward L. Ionides"
#' output:
#' html_document:
#' toc: yes
#' toc_depth: 4
#' bibliography: ../sbied.bib
#' csl: ../ecology.csl
#' nocite: |
#' @King2008, @Romero-Severson2015, @He2010,
#' @Laneri2010, @King2015
#' ---
#'
#' \newcommand\prob[1]{\mathbb{P}\left[{#1}\right]}
#' \newcommand\expect[1]{\mathbb{E}\left[{#1}\right]}
#' \newcommand\var[1]{\mathrm{Var}\left[{#1}\right]}
#' \newcommand\dist[2]{\mathrm{#1}\left(#2\right)}
#' \newcommand\dlta[1]{{\Delta}{#1}}
#' \newcommand\lik{\mathcal{L}}
#' \newcommand\loglik{\ell}
#'
#' [Licensed under the Creative Commons Attribution-NonCommercial license](http://creativecommons.org/licenses/by-nc/4.0/).
#' Please share and remix noncommercially, mentioning its origin.
#' 
#'
#' Produced in **R** version `r getRversion()` using **pomp** version `r packageVersion("pomp")`.
#'
## ----opts,include=FALSE,cache=FALSE--------------------------------------
options(stringsAsFactors=FALSE)
library(ggplot2)
theme_set(theme_bw())
set.seed(2028866059L)
#'
#' --------
#'
#' --------
#'
#' ## Introduction: ecological and epidemiological dynamics
#'
#' - Ecological systems are complex, open, nonlinear, and nonstationary.
#' - "Laws of Nature" are unavailable except in the most general form.
#' - It is useful to model them as stochastic systems.
#' - For any observable phenomenon, multiple competing explanations are possible.
#' - Central scientific goals:
#' - Which explanations are most favored by the data?
#' - Which kinds of data are most informative?
#' - Central applied goals:
#' - How to design ecological or epidemiological intervention?
#' - How to make accurate forecasts?
#' - Time series are particularly useful sources of data.
#'
#' <br>
#'
#' -----
#'
#' -----
#'
#' ### Noisy clockwork: Time series analysis of population fluctuations in animals
#'
#' ##### Six problems of @Bjornstad2001
#'
#' Obstacles for **ecological** modeling and inference via nonlinear mechanistic models:
#'
#' 1. Combining measurement noise and process noise.
#' 2. Including covariates in mechanistically plausible ways.
#' 3. Using continuous-time models.
#' 4. Modeling and estimating interactions in coupled systems.
#' 5. Dealing with unobserved variables.
#' 6. Modeling spatial-temporal dynamics.
#'
#' The same issues arise for **epidemiological** modeling and inference via nonlinear mechanistic models.
#'
#' The *partially observed Markov process* modeling framework we focus on in this course addresses most of these problems effectively.
#'
#' <br>
#'
#' ------
#'
#' ------
#'
#' ## Objectives
#'
#' 1. To show how stochastic dynamical systems models can be used as scientific instruments.
#' 1. To teach statistically and computationally efficient approaches for performing scientific inference using POMP models.
#' 1. To give students the ability to formulate models of their own.
#' 1. To give students opportunities to work with such inference methods.
#' 1. To familiarize students with the **pomp** package.
#' 1. To provide documented examples for adaptation and re-use.
#'
#' <br>
#'
#' -------
#'
#' -------
#'
#' ## Questions and answers
#'
#' 1. [What roles are played by asymptomatic infection and waning immunity in cholera epidemics?](http://dx.doi.org/10.1038/nature07084)
#' 7. [Do subclinical infections of pertussis play an important epidemiological role?](http://dx.doi.org/10.1371/journal.pone.0072086)
#' 3. [What explains the seasonality of measles?](http://dx.doi.org/10.1098/rsif.2009.0151)
#' 2. [What is the contribution to the HIV epidemic of dynamic variation in sexual behavior of an individual over time? How does this compare to the role of heterogeneity between individuals?](http://dx.doi.org/10.1093/aje/kwv044)
#' 5. [What explains the interannual variability of malaria?](http://dx.doi.org/10.1371/journal.pcbi.1000898)
#' 6. [What will happen next in an Ebola outbreak?](http://dx.doi.org/10.1098/rspb.2015.0347)
#' 1. [How does vaccine-induced immunity fail?](http://doi.org/10.1017/S0031182015000979)
#' 1. [Can hydrology explain the seasonality of cholera?](http://doi.org/10.1016/j.advwatres.2016.11.012)
#' 1. [What is the contribution of adults to polio transmission?](http://doi.org/10.1073/pnas.1323688111)
#'
#' <br>
#'
#' ------------
#'
#' -----------
#'
#' ## Partially observed Markov process (POMP) models
#'
#' * Data $y^*_1,\dots,y^*_N$ collected at times $t_1<\dots<t_N$ are modeled as noisy, incomplete, and indirect observations of a Markov process $\{X(t), t\ge t_0\}$.
#'
#' * This is a __partially observed Markov process (POMP)__ model, also known as a hidden Markov model or a state space model.
#'
#' * $\{X(t)\}$ is Markov if the history of the process, $\{X(s), s\le t\}$, is uninformative about the future of the process, $\{X(s), s\ge t\}$, given the current value of the process, $X(t)$.
#'
#' * If all quantities important for the dynamics of the system are placed in the __state__, $X(t)$, then the Markov property holds by construction.
#'
#' * Systems with delays can usually be rewritten as Markovian systems, at least approximately.
#'
#' * An important special case: any system of differential equations is Markovian.
#'
#' * POMP models can include all the features desired by @Bjornstad2001.
#'
#' <br>
#'
#' -----------------
#'
#' ------------------------------
#'
#' ### Schematic of the structure of a POMP
#'
#' - Arrows in the following diagram show causal relations.
#'
#' <img src="pomp_schematic1.png" width="400" />
#'
#'
#' - A key perspective to keep in mind is that **the model is to be viewed as the process that generated the data**.
#'
#'
#' <br>
#'
#' ---------------------
#'
#' ---------------------
#'
#' #### Notation for partially observed Markov process models
#'
#' * Write $X_n=X(t_n)$ and $X_{0:N}=(X_0,\dots,X_N)$. Let $Y_n$ be a random variable modeling the observation at time $t_n$.
#'
#' * The one-step transition density, $f_{X_n|X_{n-1}}(x_n|x_{n-1};\theta)$, together with the measurement density, $f_{Y_n|X_n}(y_n|x_n;\theta)$ and the initial density, $f_{X_0}(x_0;\theta)$, specify the entire joint density via
#'
#' $$f_{X_{0:N},Y_{1:N}}(x_{0:N},y_{1:N};\theta) = f_{X_0}(x_0;\theta)\,\prod_{n=1}^N\!f_{X_n | X_{n-1}}(x_n|x_{n-1};\theta)\,f_{Y_n|X_n}(y_n|x_n;\theta).$$
#'
#' * The marginal density for sequence of measurements, $Y_{1:N}$, evaluated at the data, $y_{1:N}^*$, is
#'
#' $$ f_{Y_{1:N}}(y^*_{1:N};\theta)=\int f_{X_{0:N},Y_{1:N}}(x_{0:N},y^*_{1:N};\theta)\, dx_{0:N}.$$
#'
#' <br>
#'
#' ------------------------------
#'
#' ------------------------------
#'
#' ### Another POMP model schematic
#'
#' - In the following diagram, arrows show dependence among model variables:
#'
#'
#' - The state process, $X_n$, is Markovian, i.e.,
#'
#' $$f_{X_n|X_{0:n-1},Y_{1:n-1}}(x_n|x_{0:n-1},y_{1:n-1})=f_{X_n|X_{n-1}}(x_n|x_{n-1}).$$
#'
#' - Moreover, the measurable random variable, $Y_n$, depends only on the state at that time:
#' $$f_{Y_n|X_{0:N},Y_{1:n-1}}(y_n|x_{0:n},y_{1:n-1})=f_{Y_n|X_{n}}(y_n|x_n),$$
#' for all $n=1,\dots,N$.
#'
#' <br>
#'
#' -----------------
#'
#' ----------------
#'
#' ### Algorithms for POMP models
#'
#' To think algorithmically, we define some function calls:
#'
#' * `rprocess( )`: a draw from $f_{X_n|X_{n-1}}(x_n| x_{n-1};\theta)$
#'
#' * `dprocess( )`: evaluation of $f_{X_n|X_{n-1}}(x_n| x_{n-1};\theta)$
#'
#' * `rmeasure( )`: a draw from $f_{Y_n|X_n}(y_n| x_n;\theta)$
#'
#' * `dmeasure( )`: evaluation of $f_{Y_n|X_n}(y_n| x_n;\theta)$
#'
#' * `initializer( )`: a draw from $f_{X_0}(x_0;\theta)$
#'
#' <br>
#'
#' -------------
#'
#' -------------
#'
#' ### What does it mean for methodology to be __simulation-based__?
#'
#' * Simulating random processes is often much easier than evaluating their transition probabilities.
#'
#' * In other words, we may be able to write `rprocess()` but not `dprocess()`.
#'
#' * __Simulation-based__ methods require the user to specify `rprocess()` but not `dprocess()`.
#'
#' * __Plug-and-play__, __likelihood-free__ and __equation-free__ are alternative terms for "simulation-based" methods.
#'
#' * Much development of simulation-based statistical methodology has occurred in the past decade.
#'
#' <br>
#'
#' ------------
#'
#' ------------
#'
#' ## The **pomp** package for POMP models
#'
#' * **pomp** is an **R** package for data analysis using partially observed Markov process (POMP) models.
#'
#' * Note the distinction: lower case **pomp** is a software package;
#' upper case POMP is a class of models.
#'
#' * **pomp** builds methodology for POMP models in terms of arbitrary user-specified `rprocess()`, `dprocess()`, `rmeasure()`, and `dmeasure()` functions.
#'
#' * Following modern practice, most methodology in **pomp** is simulation-based, so does not require specification of `dprocess()`.
#'
#' * **pomp** has facilities to help construct `rprocess()`, `rmeasure()`, and `dmeasure()` functions for model classes of epidemiological interest.
#'
#' * **pomp** provides a forum for development, modification and sharing of models, methodology and data analysis workflows.
#'
#' <br>
#'
#' ---------
#'
#' ---------
#'
#' ### Example
#'
#' #### The deterministic Ricker map
#'
#' - The Ricker map describes the deterministic dynamics of a simple population,
#' $$N_{t+1} = r\,N_{t}\,\exp\big(-c\,N_{t}\big).$$
#' + $N_t$ is the population density at time $t$.
#' + $r$ is a fixed value (a parameter) describing the population's intrinsic capacity to increase in one unit of time.
#' + The parameter $c$ scales the density-dependent population regulation.
#' + The equilibrium population is $N_t=\log(r)/c$.
#'
#' - $N$ is a *state variable*, $r$ and $c$ are *parameters*. $r$ is dimensionless and $c$ has units of inverse density.
#'
#' - For simplicity, we will fix $c=1$ for the remainder of this document.
#'
#' - If we know $r$ and the *initial condition* $N_0$, the deterministic Ricker equation predicts the future population density at all times $t=1,2,\dots$.
#'
#' - We can view the initial condition, $N_0$ as a special kind of parameter, an *initial-value parameter*.
#'
#' <br>
#'
#' ---------
#'
#' --------
#'
#' #### Process noise
#'
#' - We can model process noise in this system by making the growth rate into a random variable with mean $r$.
#'
#' - For example, if we assume that the intrinsic growth rate is log-normally distributed, $N$ becomes a stochastic process governed by
#' $$N_{t+1} = r\,N_{t}\,\exp(-c\,N_{t}+\varepsilon_{t}), \qquad \varepsilon_{t}\;\sim\;\dist{Normal}{0,\sigma},$$
#' where the new parameter $\sigma$ is the standard deviation of the noise process $\varepsilon$.
#'
#' <br>
#'
#' -----------
#'
#' ----------
#'
#' #### Measurement error
#'
#' - Let's suppose that the Ricker model is our model for the dynamics of a real population.
#'
#' - However, we cannot know the exact population density at any time, but only estimate it through sampling.
#'
#' - Let's model measurement error by assuming the observed measurement, $y_t$, is modeled as a realization of a random variable $Y_t$ that is Poisson with mean $\phi\,N_t$:
#' $$Y_{t}\;\sim\;\dist{Poisson}{\phi\,N_{t}}$$
#'
#' - In this equation,
#'
#' 1. $N_t$ models the true population density at time $t$,
#' 2. $Y_t$ models the number of individuals sampled at time $t$,
#' 3. the parameter $\phi$ is proportional to our sampling effort.
#' 4. $Y_t$ is dimensionless, so $\phi N_t$ must also be dimensionless.
#'
#' <br>
#'
#' ------
#'
#' -----
#'
#' ### Working with the Ricker model in **pomp**.
#'
#' - The **R** package **pomp** provides facilities for modeling POMPs, a toolbox of statistical inference methods for analyzing data using POMPs, and a development platform for implementing new POMP inference methods.
#'
#' - The basic data-structure provided by **pomp** is the *object of class* `pomp`, alternatively known as a "pomp object".
#'
#' - It is a container that holds real or simulated data and a POMP model, possibly together with other information such as model parameters, that may be needed to do things with the model and data.
#'
#' Let's see what can be done with a pomp object.
#' First, we'll load some packages, including **pomp**.
#'
## ----prelims,cache=F-----------------------------------------------------
library(ggplot2)
library(reshape2)
library(pomp)
stopifnot(packageVersion("pomp")>="1.12")
#'
#' A pre-built pomp object encoding the Ricker model comes included with the package. Load it by
#'
## ----load-ricker,cache=FALSE,results="hide"------------------------------
pompExample(ricker)
#'
#' This has the effect of creating a pomp object named `ricker` in your workspace.
#' We can plot the data by doing
#'
## ----plot-ricker---------------------------------------------------------
plot(ricker)
#'
#' We can simulate by doing
#'
## ----sim-ricker1---------------------------------------------------------
x <- simulate(ricker)
#'
#' What kind of object have we created?
#'
## ------------------------------------------------------------------------
class(x)
plot(x)
#'
#' Why do we see more time series in the simulated pomp object than in the original?
#'
#' We can turn a pomp object into a data frame:
#'
## ------------------------------------------------------------------------
y <- as.data.frame(ricker)
head(y)
head(simulate(ricker,as.data.frame=TRUE))
#'
#' We can also run multiple simulations simultaneously:
#'
## ------------------------------------------------------------------------
x <- simulate(ricker,nsim=10)
class(x)
sapply(x,class)
x <- simulate(ricker,nsim=10,as.data.frame=TRUE)
head(x)
str(x)
#'
#' It's often useful to plot several simulations from a model against the actual data.
#' One way to accomplish this is as follows.
#'
## ----fig.height=8--------------------------------------------------------
x <- simulate(ricker,nsim=9,as.data.frame=TRUE,include.data=TRUE)
ggplot(data=x,aes(x=time,y=y,group=sim,color=(sim=="data")))+
geom_line()+guides(color=FALSE)+
facet_wrap(~sim,ncol=2)
#'
#' We refer to the deterministic map as the "skeleton" of the stochastic map.
#'
#' We can compute a trajectory of the the deterministic skeleton using `trajectory`:
#'
## ----traj-ricker---------------------------------------------------------
y <- trajectory(ricker)
dim(y) #2 1 51
dimnames(y) #variable 'N' and 'e'
plot(time(ricker),y["N",1,],type="l")
#'
#'
#' We can extract or set the parameters in the pomp object using `coef`:
#'
## ----coef----------------------------------------------------------------
coef(ricker)
coef(ricker,"phi")
coef(ricker) <- c(phi=20,c=1,r=44,sigma=0.3,N.0=10,e.0=0)
coef(ricker)
coef(ricker,c("phi","c")) <- c(10,2)
coef(ricker)
#'
#' Note that the order in which the parameters appear is irrelevant.
#'
#' More information on manipulating and extracting information from pomp objects can be viewed in the help pages (`methods?pomp`).
#'
#' There are a number of other examples included with the package.
#' Do `pompExample()` to see a list of these.
#'
#' More examples can be found in the **pompExamples** package:
#'
#'
#' <br>
#'
#' -------
#'
#' -------
#'
#' ### Inference algorithms in **pomp**
#'
#' **pomp** provides a wide range of inference algorithms.
#' We'll learn about these in detail soon, but for now, let's just look at some of their general features.
#'
#' The `pfilter` function runs a simple particle filter.
#' It can be used to evaluate the likelihood at a particular set of parameters.
#' One uses the `Np` argument to specify the number of particles to use:
#'
## ----reset-ricker,include=FALSE------------------------------------------
pompExample(ricker)
#'
#'
## ----pfilter1------------------------------------------------------------
pf <- pfilter(ricker,Np=1000)
class(pf)
plot(pf)
logLik(pf)
#'
#' Note that `pfilter` returns an object of class `pfilterd.pomp`.
#' This is the general rule: inference algorithms return objects that are pomp objects with additional information.
#'
#' The package provides tools to extract this information.
#' We can run the particle filter again by doing
#'
## ----pfilter2------------------------------------------------------------
pf <- pfilter(pf)
logLik(pf)
#'
#' This the result of running the same computation again.
#' Note that, because the particle filter is a Monte Carlo algorithm, we get a slightly different estimate of the log likelihood.
#'
#' Note that, by default, running `pfilter` on a `pfilterd.pomp` object causes the computation to be re-run with the same parameters as before.
#' Any additional arguments we add override these defaults.
#'
#' This is the general rule in **pomp**.
#' For example,
#'
## ----pfilter3------------------------------------------------------------
pf <- pfilter(pf,Np=100)
logLik(pf)
#'
#' Here, the particle filtering has been performed with only `r unique(pf@Np)` particles.
#'
#' <br>
#'
#' -------
#'
#' ------
#'
#' ### Building a custom pomp object
#'
#' The usefulness of **pomp** in scientific research hinges on its facilities for implementing the full range of POMP models.
#' To get started building custom `pomp` models, see this [introductory tutorial](./ricker.html).
#'
#' <br>
#'
#' ------
#'
#' ------
#'
#' #### A note on terminology
#'
#' If we know the state, $x(t_0)$, of the system at time $t_0$, it makes sense to speak about the entire trajectory of the system for all $t>t_0$.
#' This is true whether we are thinking of the system as deterministic or stochastic.
#' Of course, in the former case, the trajectory is uniquely determined by $x(t_0)$, while in the stochastic case, only the probability distribution of $x(t)$, $t>t_0$ is determined.
#' To avoid confusion, we use the term "trajectory" exclusively to refer to *trajectories of a deterministic process*.
#' Thus, the `trajectory` command iterates or integrates the deterministic skeleton forward in time, returning the unique trajectory determined by the specified parameters.
#' When we want to speak about sample paths of a stochastic process, we use the term *simulation*.
#' Accordingly, the `simulate` command always returns individual sample paths from the POMP.
#' In particular, we avoid "simulating a set of differential equations", preferring instead to speak of "integrating" the equations, or "computing trajectories".
#'
#' ------------------------------
#'
#' ## [Back to course homepage](../index.html)
#' ## [**R** codes for this document](http://raw.githubusercontent.com/kingaa/sbied/master/intro/intro.R)
#'
#' ----------------------
#'
#' ## References
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.