content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
context("DS analysis results reformatting")
# load packages
suppressMessages({
library(dplyr)
library(purrr)
library(SingleCellExperiment)
})
# generate toy dataset
seed <- as.numeric(format(Sys.time(), "%s"))
set.seed(seed)
x <- .toySCE()
nk <- length(kids <- levels(x$cluster_id))
ns <- length(sids <- levels(x$sample_id))
ng <- length(gids <- levels(x$group_id))
# sample 'n_de' genes & multiply counts by 10 for 'g2/3'-cells
g23 <- x$group_id != "g1"
de_gs <- sample(rownames(x), (n_de <- 5))
assay(x[de_gs, g23]) <- assay(x[de_gs, g23]) * 10
# aggregate & run pseudobulk DS analysis
nc <- length(cs <- c(2, 3))
y <- aggregateData(x, assay = "counts", fun = "sum")
y <- pbDS(y, coef = cs, verbose = FALSE)
test_that("resDS()", {
v <- list(col = list(nr = nrow(x)*nk, ng = nk, nk = nrow(x)))
v$row <- lapply(v$col, "*", nc)
v$col$char_cols <- c("gene", "cluster_id")
v$row$char_cols <- c(v$col$char_cols, "coef")
for (bind in c("row", "col")) {
z <- resDS(x, y, bind, frq = FALSE, cpm = FALSE)
expect_is(z, "data.frame")
expect_identical(nrow(z), v[[bind]]$nr)
expect_true(all(table(z$gene) == v[[bind]]$ng))
expect_true(all(table(z$cluster_id) == v[[bind]]$nk))
is_char <- colnames(z) %in% v[[bind]]$char_cols
expect_true(all(apply(z[, !is_char], 2, class) == "numeric"))
expect_true(all(apply(z[, is_char], 2, class) == "character"))
}
})
test_that("resDS() - 'frq = TRUE'", {
z <- resDS(x, y, frq = TRUE)
u <- z[, grep("frq", colnames(z))]
expect_true(ncol(u) == ns + ng)
expect_true(all(u <= 1 & u >= 0 | is.na(u)))
# remove single cluster-sample instance
s <- sample(sids, 1); k <- sample(kids, 1)
x_ <- x[, !(x$sample_id == s & x$cluster_id == k)]
y_ <- aggregateData(x_, assay = "counts", fun = "sum")
y_ <- pbDS(y_, coef = cs, verbose = FALSE)
z <- resDS(x_, y_, frq = TRUE)
u <- z[, grep("frq", colnames(z))]
expect_true(ncol(u) == ns + ng)
expect_true(all(u <= 1 & u >= 0 | is.na(u)))
expect_true(all(z[z$cluster_id == k, paste0(s, ".frq")] == 0))
})
test_that("resDS() - 'cpm = TRUE'", {
z <- resDS(x, y, cpm = TRUE)
u <- z[, grep("cpm", colnames(z))]
expect_true(ncol(u) == ns)
expect_true(all(u %% 2 == 0 | is.na(u)))
})
|
/tests/testthat/test-resDS.R
|
no_license
|
jsadick/muscat
|
R
| false
| false
| 2,320
|
r
|
context("DS analysis results reformatting")
# load packages
suppressMessages({
library(dplyr)
library(purrr)
library(SingleCellExperiment)
})
# generate toy dataset
seed <- as.numeric(format(Sys.time(), "%s"))
set.seed(seed)
x <- .toySCE()
nk <- length(kids <- levels(x$cluster_id))
ns <- length(sids <- levels(x$sample_id))
ng <- length(gids <- levels(x$group_id))
# sample 'n_de' genes & multiply counts by 10 for 'g2/3'-cells
g23 <- x$group_id != "g1"
de_gs <- sample(rownames(x), (n_de <- 5))
assay(x[de_gs, g23]) <- assay(x[de_gs, g23]) * 10
# aggregate & run pseudobulk DS analysis
nc <- length(cs <- c(2, 3))
y <- aggregateData(x, assay = "counts", fun = "sum")
y <- pbDS(y, coef = cs, verbose = FALSE)
test_that("resDS()", {
v <- list(col = list(nr = nrow(x)*nk, ng = nk, nk = nrow(x)))
v$row <- lapply(v$col, "*", nc)
v$col$char_cols <- c("gene", "cluster_id")
v$row$char_cols <- c(v$col$char_cols, "coef")
for (bind in c("row", "col")) {
z <- resDS(x, y, bind, frq = FALSE, cpm = FALSE)
expect_is(z, "data.frame")
expect_identical(nrow(z), v[[bind]]$nr)
expect_true(all(table(z$gene) == v[[bind]]$ng))
expect_true(all(table(z$cluster_id) == v[[bind]]$nk))
is_char <- colnames(z) %in% v[[bind]]$char_cols
expect_true(all(apply(z[, !is_char], 2, class) == "numeric"))
expect_true(all(apply(z[, is_char], 2, class) == "character"))
}
})
test_that("resDS() - 'frq = TRUE'", {
z <- resDS(x, y, frq = TRUE)
u <- z[, grep("frq", colnames(z))]
expect_true(ncol(u) == ns + ng)
expect_true(all(u <= 1 & u >= 0 | is.na(u)))
# remove single cluster-sample instance
s <- sample(sids, 1); k <- sample(kids, 1)
x_ <- x[, !(x$sample_id == s & x$cluster_id == k)]
y_ <- aggregateData(x_, assay = "counts", fun = "sum")
y_ <- pbDS(y_, coef = cs, verbose = FALSE)
z <- resDS(x_, y_, frq = TRUE)
u <- z[, grep("frq", colnames(z))]
expect_true(ncol(u) == ns + ng)
expect_true(all(u <= 1 & u >= 0 | is.na(u)))
expect_true(all(z[z$cluster_id == k, paste0(s, ".frq")] == 0))
})
test_that("resDS() - 'cpm = TRUE'", {
z <- resDS(x, y, cpm = TRUE)
u <- z[, grep("cpm", colnames(z))]
expect_true(ncol(u) == ns)
expect_true(all(u %% 2 == 0 | is.na(u)))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/india.R
\name{get_india_regional_cases}
\alias{get_india_regional_cases}
\title{Indian Regional Daily COVID-19 Count Data - State}
\usage{
get_india_regional_cases()
}
\value{
A dataframe of daily India data to be further processed by \code{\link[=get_regional_data]{get_regional_data()}}.
}
\description{
Extracts daily COVID-19 data for India, stratified by State
Data available at \url{https://opendata.arcgis.com/datasets/dd4580c810204019a7b8eb3e0b329dd6_0.csv}.
It is loaded and then sanitised.
}
|
/man/get_india_regional_cases.Rd
|
permissive
|
mariabnd/covidregionaldata
|
R
| false
| true
| 581
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/india.R
\name{get_india_regional_cases}
\alias{get_india_regional_cases}
\title{Indian Regional Daily COVID-19 Count Data - State}
\usage{
get_india_regional_cases()
}
\value{
A dataframe of daily India data to be further processed by \code{\link[=get_regional_data]{get_regional_data()}}.
}
\description{
Extracts daily COVID-19 data for India, stratified by State
Data available at \url{https://opendata.arcgis.com/datasets/dd4580c810204019a7b8eb3e0b329dd6_0.csv}.
It is loaded and then sanitised.
}
|
#' @title Full copy number detection for targeted NGS panel data for
#' multiple samples
#' @description This function performs first quality control and runs
#' panelcn.mops for CNV detection on all test samples.
#' @param XandCB GRanges object of combined read counts of test samples and
#' control samples as returned by countBamListInGRanges
#' @param testiv vector of indices of test samples in XandCB. Default = c(1)
#' @param countWindows data.frame with contents of a BED file as returned by
#' getWindows
#' @param selectedGenes vector of names of genes of interest or NULL if all
#' genes are of interest. Default = NULL
#' @param I vector of positive real values containing the expected fold change
#' of the copy number classes. Length of this vector must be equal to the
#' length of the "classes" parameter vector. For targeted NGS panel data
#' the default is c(0.025,0.57,1,1.46,2)
#' @param normType type of the normalization technique. Each samples'
#' read counts are scaled such that the total number of reads are comparable
#' across samples. Options are "mean","median","poisson", "quant", and "mode"
#' Default = "quant"
#' @param sizeFactor parameter for calculating the size factors for
#' normalization. Options are "mean","median", "quant", and "mode".
#' Default = "quant"
#' @param qu Quantile of the normType if normType is set to "quant".
#' Real value between 0 and 1. Default = 0.25
#' @param quSizeFactor Quantile of the sizeFactor if sizeFactor is set to
#' "quant". 0.75 corresponds to "upper quartile normalization".
#' Real value between 0 and 1. Default = 0.75
#' @param norm the normalization strategy to be used. If set to 0 the read
#' counts are not normalized and cn.mops does not model different coverages.
#' If set to 1 the read counts are normalized. If set to 2 the read counts are
#' not normalized and cn.mops models different coverages. Default = 1.
#' @param priorImpact positive real value that reflects how strong the prior
#' assumption affects the result. The higher the value the more samples will be
#' assumed to have copy number 2. Default = 1
#' @param minMedianRC segments with median read counts over
#' all samples < minMedianRC are excluded from the analysis
#' @param maxControls integer reflecting the maximal numbers of controls to
#' use. If set to 0 all highly correlated controls are used. Default = 25
#' @param corrThresh threshold for selecting highly correlated controls.
#' Default = 0.99
#' @param sex either "mixed", "male", or "female" reflecting the sex of
#' all samples (test and control)
#' @return list of instances of "CNVDetectionResult"
#' @import S4Vectors
#' @importClassesFrom cn.mops CNVDetectionResult
#' @examples
#' data(panelcn.mops)
#' XandCB <- test
#' elementMetadata(XandCB) <- cbind(elementMetadata(XandCB),
#' elementMetadata(control))
#' resultlist <- runPanelcnMops(XandCB, countWindows = countWindows)
#' @export
runPanelcnMops <- function(XandCB, testiv = c(1), countWindows,
selectedGenes = NULL,
I = c(0.025, 0.57, 1, 1.46, 2),
normType = "quant", sizeFactor = "quant",
qu = 0.25, quSizeFactor = 0.75,
norm = 1, priorImpact = 1, minMedianRC = 30,
maxControls = 25, corrThresh = 0.99,
sex = "mixed") {
if (missing(countWindows)) {
stop("\"countWindows\" need to be specified.")
}
if(!(sex %in% c("mixed", "male", "female"))) {
message(paste0("Setting sex=", sex, " not possible - ",
"using sex=\"mixed\""))
}
if (is.null(selectedGenes)) {
message("All genes selected.")
selectedGenes <- c()
}
XandCB@elementMetadata <- XandCB@elementMetadata[,c(testiv,
(1:ncol(XandCB@elementMetadata))[-testiv])]
testiv <- 1:length(testiv)
sampleNames <- colnames(XandCB@elementMetadata)
message(paste0("Analyzing sample(s) ", sampleNames[testiv], "\n"))
XandCBMatrix <- as.matrix(XandCB@elementMetadata)
## quality control
maxRC <- apply(XandCBMatrix, 1, max)
medianRC <- apply(XandCBMatrix, 1, median)
sampleMedian <- apply(XandCBMatrix, 2, median)
sampleThresh <- median(sampleMedian[-testiv])*0.55
# sampleThresh <- mean(sampleMedian[-testiv]) - 2*sd(sampleMedian[-testiv])
message(paste("new sampleThresh", sampleThresh))
poorQual <- which(medianRC < minMedianRC)
highRC <- which(maxRC >= 5000 & maxRC < 25000)
veryHighRC <- which(maxRC >= 25000)
poorSamples <- which(sampleMedian < sampleThresh)
for (h in highRC) {
for (s in seq_len(ncol(XandCBMatrix))) {
XandCB@elementMetadata[h,s] <- XandCBMatrix[h,s]/10
}
}
for (h in veryHighRC) {
for (s in seq_len(ncol(XandCBMatrix))) {
XandCB@elementMetadata[h,s] <- XandCBMatrix[h,s]/100
}
}
colnames(XandCB@elementMetadata) <- sampleNames
if (length(highRC) > 0){
message(paste0("Had to reduce read counts for exon ",
countWindows[highRC,]$name,"\n"))
}
if (length(veryHighRC) > 0){
message(paste0("Had to reduce read counts for exon ",
countWindows[veryHighRC,]$name,"\n"))
}
if (length(poorQual) > 0) {
message(paste("Cannot use exon", countWindows[poorQual,]$name, "\n"))
}
XChr <- c(which(countWindows$chromosome=="chrX" |
countWindows$chromosome=="X"))
if (length(XChr) > 0) {
if (sex=="mixed") {
message(paste0("Ignoring X-chromosomal exons ",
"(sex is mixed/unknown).\n"))
} else {
message(paste0("All females or all males selected. ",
"Chromosome X treated like autosomes."))
XChr <- c()
}
if (sex=="male") {
message("Male: Note that CN2 is actually CN1 for chromosome X.")
}
}
YChr <- c(which(countWindows$chromosome=="chrY" |
countWindows$chromosome=="Y"))
if (length(YChr) > 0) {
message(paste0("Ignoring Y-chromosomal exons."))
}
ignoreExons <- unique(c(poorQual, XChr, YChr))
subsetIdx <- rep(TRUE, nrow(countWindows))
subsetIdx[ignoreExons] <- FALSE
usedExons <- seq_len(nrow(countWindows))[-ignoreExons]
if (length(ignoreExons) > 0) {
countWindows <- countWindows[-ignoreExons,]
}
countWindows <- countWindows[order(suppressWarnings(
as.numeric(countWindows[,1])), countWindows[,2]),]
if (length(selectedGenes) > 0) {
geneInd <- c()
for (g in selectedGenes) {
geneIndTemp <- which(countWindows$gene==g)
if (length(geneIndTemp) == 0) {
message(paste0("Gene ", g, " not in \"countWindows\""))
}
geneInd <- c(geneInd, geneIndTemp)
}
if (length(geneInd) == 0) {
stop(paste0("At least one of the \"selectedGenes\" needs to be ",
"in \"countWindows\"."))
}
} else {
geneInd <- NULL
}
poorDBSamples <- poorSamples[!(poorSamples %in% testiv)]
poorTestSamples <- poorSamples[poorSamples %in% testiv]
if (length(poorSamples) > 0) {
message(paste("Ignoring bad control sample", sampleNames[poorDBSamples],
"\n"))
}
if (length(poorTestSamples) > 0) {
message(paste("Bad test sample", sampleNames[poorTestSamples], "\n"))
}
poorSamples <- poorDBSamples
if (length(poorSamples) > 0) {
XandCB <- XandCB[,-poorSamples]
sampleNames <- sampleNames[-poorSamples]
colnames(XandCB@elementMetadata) <- sampleNames
}
ii <- 1
resultlist <- list()
for (t in testiv) {
message(paste0("\nAnalyzing sample ", sampleNames[t], "\n"))
controli <- seq_len(ncol(XandCB@elementMetadata))[-testiv]
dup <- grep(sampleNames[t], sampleNames[-testiv])
if (length(dup) > 0) {
message("Removing test sample from control samples\n")
controli <- controli[-dup]
}
result <- panelcn.mops(subset(XandCB[,c(t,controli)], subsetIdx),
testi = 1, geneInd = geneInd, I = I,
priorImpact = priorImpact,
normType = normType, sizeFactor = sizeFactor,
qu = qu, quSizeFactor = quSizeFactor, norm = norm,
maxControls = maxControls, corrThresh = corrThresh)
resultlist[[ii]] <- result
ii <- ii + 1
}
return(resultlist)
}
#' Test data included in panelcn.mops
#' @name test
#' @docType data
#' @title GRanges object of countWindows with read counts for a test sample as
#' elementMetadata.
#' @description The object was created using the function
#' countBamListInGRanges with the enclosed countWindows object, a subset of a
#' BAM file provided by the 1000 Genomes Project and the read.width parameter
#' set to 150.
#' @keywords data
#' @examples
#' data(panelcn.mops)
#' test
#' @author Gundula Povysil
NULL
#' Control data included in panelcn.mops
#' @name control
#' @docType data
#' @title GRanges object of countWindows with read counts for control samples
#' as elementMetadata.
#' @description The object was created using the function
#' countBamListInGRanges with the enclosed countWindows object, a subset of
#' BAM files provided by the 1000 Genomes Project and the read.width parameter
#' set to 150.
#' @keywords data
#' @examples
#' data(panelcn.mops)
#' control
#' @author Gundula Povysil
NULL
#' Data included in panelcn.mops
#' @name countWindows
#' @docType data
#' @title result object of getWindows - a data.frame with the contents of
#' the provided BED file with an additional gene name and exon name column
#' @examples
#' data(panelcn.mops)
#' countWindows
#' @keywords data
#' @author Gundula Povysil
NULL
#' Result data included in panelcn.mops
#' @name resultlist
#' @docType data
#' @title result object of runPanelcnMops - a list of instances of
#' "CNVDetectionResult"
#' @keywords data
#' @examples
#' data(panelcn.mops)
#' resultlist
#' @author Gundula Povysil
NULL
#' Data included in panelcn.mops
#' @name read.width
#' @docType data
#' @title read width used for calculating RCs of test and control
#' @keywords data
#' @examples
#' data(panelcn.mops)
#' read.width
#' @author Gundula Povysil
NULL
|
/R/runPanelcnMops.R
|
no_license
|
bioinf-jku/panelcn.mops
|
R
| false
| false
| 10,685
|
r
|
#' @title Full copy number detection for targeted NGS panel data for
#' multiple samples
#' @description This function performs first quality control and runs
#' panelcn.mops for CNV detection on all test samples.
#' @param XandCB GRanges object of combined read counts of test samples and
#' control samples as returned by countBamListInGRanges
#' @param testiv vector of indices of test samples in XandCB. Default = c(1)
#' @param countWindows data.frame with contents of a BED file as returned by
#' getWindows
#' @param selectedGenes vector of names of genes of interest or NULL if all
#' genes are of interest. Default = NULL
#' @param I vector of positive real values containing the expected fold change
#' of the copy number classes. Length of this vector must be equal to the
#' length of the "classes" parameter vector. For targeted NGS panel data
#' the default is c(0.025,0.57,1,1.46,2)
#' @param normType type of the normalization technique. Each samples'
#' read counts are scaled such that the total number of reads are comparable
#' across samples. Options are "mean","median","poisson", "quant", and "mode"
#' Default = "quant"
#' @param sizeFactor parameter for calculating the size factors for
#' normalization. Options are "mean","median", "quant", and "mode".
#' Default = "quant"
#' @param qu Quantile of the normType if normType is set to "quant".
#' Real value between 0 and 1. Default = 0.25
#' @param quSizeFactor Quantile of the sizeFactor if sizeFactor is set to
#' "quant". 0.75 corresponds to "upper quartile normalization".
#' Real value between 0 and 1. Default = 0.75
#' @param norm the normalization strategy to be used. If set to 0 the read
#' counts are not normalized and cn.mops does not model different coverages.
#' If set to 1 the read counts are normalized. If set to 2 the read counts are
#' not normalized and cn.mops models different coverages. Default = 1.
#' @param priorImpact positive real value that reflects how strong the prior
#' assumption affects the result. The higher the value the more samples will be
#' assumed to have copy number 2. Default = 1
#' @param minMedianRC segments with median read counts over
#' all samples < minMedianRC are excluded from the analysis
#' @param maxControls integer reflecting the maximal numbers of controls to
#' use. If set to 0 all highly correlated controls are used. Default = 25
#' @param corrThresh threshold for selecting highly correlated controls.
#' Default = 0.99
#' @param sex either "mixed", "male", or "female" reflecting the sex of
#' all samples (test and control)
#' @return list of instances of "CNVDetectionResult"
#' @import S4Vectors
#' @importClassesFrom cn.mops CNVDetectionResult
#' @examples
#' data(panelcn.mops)
#' XandCB <- test
#' elementMetadata(XandCB) <- cbind(elementMetadata(XandCB),
#' elementMetadata(control))
#' resultlist <- runPanelcnMops(XandCB, countWindows = countWindows)
#' @export
runPanelcnMops <- function(XandCB, testiv = c(1), countWindows,
selectedGenes = NULL,
I = c(0.025, 0.57, 1, 1.46, 2),
normType = "quant", sizeFactor = "quant",
qu = 0.25, quSizeFactor = 0.75,
norm = 1, priorImpact = 1, minMedianRC = 30,
maxControls = 25, corrThresh = 0.99,
sex = "mixed") {
if (missing(countWindows)) {
stop("\"countWindows\" need to be specified.")
}
if(!(sex %in% c("mixed", "male", "female"))) {
message(paste0("Setting sex=", sex, " not possible - ",
"using sex=\"mixed\""))
}
if (is.null(selectedGenes)) {
message("All genes selected.")
selectedGenes <- c()
}
XandCB@elementMetadata <- XandCB@elementMetadata[,c(testiv,
(1:ncol(XandCB@elementMetadata))[-testiv])]
testiv <- 1:length(testiv)
sampleNames <- colnames(XandCB@elementMetadata)
message(paste0("Analyzing sample(s) ", sampleNames[testiv], "\n"))
XandCBMatrix <- as.matrix(XandCB@elementMetadata)
## quality control
maxRC <- apply(XandCBMatrix, 1, max)
medianRC <- apply(XandCBMatrix, 1, median)
sampleMedian <- apply(XandCBMatrix, 2, median)
sampleThresh <- median(sampleMedian[-testiv])*0.55
# sampleThresh <- mean(sampleMedian[-testiv]) - 2*sd(sampleMedian[-testiv])
message(paste("new sampleThresh", sampleThresh))
poorQual <- which(medianRC < minMedianRC)
highRC <- which(maxRC >= 5000 & maxRC < 25000)
veryHighRC <- which(maxRC >= 25000)
poorSamples <- which(sampleMedian < sampleThresh)
for (h in highRC) {
for (s in seq_len(ncol(XandCBMatrix))) {
XandCB@elementMetadata[h,s] <- XandCBMatrix[h,s]/10
}
}
for (h in veryHighRC) {
for (s in seq_len(ncol(XandCBMatrix))) {
XandCB@elementMetadata[h,s] <- XandCBMatrix[h,s]/100
}
}
colnames(XandCB@elementMetadata) <- sampleNames
if (length(highRC) > 0){
message(paste0("Had to reduce read counts for exon ",
countWindows[highRC,]$name,"\n"))
}
if (length(veryHighRC) > 0){
message(paste0("Had to reduce read counts for exon ",
countWindows[veryHighRC,]$name,"\n"))
}
if (length(poorQual) > 0) {
message(paste("Cannot use exon", countWindows[poorQual,]$name, "\n"))
}
XChr <- c(which(countWindows$chromosome=="chrX" |
countWindows$chromosome=="X"))
if (length(XChr) > 0) {
if (sex=="mixed") {
message(paste0("Ignoring X-chromosomal exons ",
"(sex is mixed/unknown).\n"))
} else {
message(paste0("All females or all males selected. ",
"Chromosome X treated like autosomes."))
XChr <- c()
}
if (sex=="male") {
message("Male: Note that CN2 is actually CN1 for chromosome X.")
}
}
YChr <- c(which(countWindows$chromosome=="chrY" |
countWindows$chromosome=="Y"))
if (length(YChr) > 0) {
message(paste0("Ignoring Y-chromosomal exons."))
}
ignoreExons <- unique(c(poorQual, XChr, YChr))
subsetIdx <- rep(TRUE, nrow(countWindows))
subsetIdx[ignoreExons] <- FALSE
usedExons <- seq_len(nrow(countWindows))[-ignoreExons]
if (length(ignoreExons) > 0) {
countWindows <- countWindows[-ignoreExons,]
}
countWindows <- countWindows[order(suppressWarnings(
as.numeric(countWindows[,1])), countWindows[,2]),]
if (length(selectedGenes) > 0) {
geneInd <- c()
for (g in selectedGenes) {
geneIndTemp <- which(countWindows$gene==g)
if (length(geneIndTemp) == 0) {
message(paste0("Gene ", g, " not in \"countWindows\""))
}
geneInd <- c(geneInd, geneIndTemp)
}
if (length(geneInd) == 0) {
stop(paste0("At least one of the \"selectedGenes\" needs to be ",
"in \"countWindows\"."))
}
} else {
geneInd <- NULL
}
poorDBSamples <- poorSamples[!(poorSamples %in% testiv)]
poorTestSamples <- poorSamples[poorSamples %in% testiv]
if (length(poorSamples) > 0) {
message(paste("Ignoring bad control sample", sampleNames[poorDBSamples],
"\n"))
}
if (length(poorTestSamples) > 0) {
message(paste("Bad test sample", sampleNames[poorTestSamples], "\n"))
}
poorSamples <- poorDBSamples
if (length(poorSamples) > 0) {
XandCB <- XandCB[,-poorSamples]
sampleNames <- sampleNames[-poorSamples]
colnames(XandCB@elementMetadata) <- sampleNames
}
ii <- 1
resultlist <- list()
for (t in testiv) {
message(paste0("\nAnalyzing sample ", sampleNames[t], "\n"))
controli <- seq_len(ncol(XandCB@elementMetadata))[-testiv]
dup <- grep(sampleNames[t], sampleNames[-testiv])
if (length(dup) > 0) {
message("Removing test sample from control samples\n")
controli <- controli[-dup]
}
result <- panelcn.mops(subset(XandCB[,c(t,controli)], subsetIdx),
testi = 1, geneInd = geneInd, I = I,
priorImpact = priorImpact,
normType = normType, sizeFactor = sizeFactor,
qu = qu, quSizeFactor = quSizeFactor, norm = norm,
maxControls = maxControls, corrThresh = corrThresh)
resultlist[[ii]] <- result
ii <- ii + 1
}
return(resultlist)
}
#' Test data included in panelcn.mops
#' @name test
#' @docType data
#' @title GRanges object of countWindows with read counts for a test sample as
#' elementMetadata.
#' @description The object was created using the function
#' countBamListInGRanges with the enclosed countWindows object, a subset of a
#' BAM file provided by the 1000 Genomes Project and the read.width parameter
#' set to 150.
#' @keywords data
#' @examples
#' data(panelcn.mops)
#' test
#' @author Gundula Povysil
NULL
#' Control data included in panelcn.mops
#' @name control
#' @docType data
#' @title GRanges object of countWindows with read counts for control samples
#' as elementMetadata.
#' @description The object was created using the function
#' countBamListInGRanges with the enclosed countWindows object, a subset of
#' BAM files provided by the 1000 Genomes Project and the read.width parameter
#' set to 150.
#' @keywords data
#' @examples
#' data(panelcn.mops)
#' control
#' @author Gundula Povysil
NULL
#' Data included in panelcn.mops
#' @name countWindows
#' @docType data
#' @title result object of getWindows - a data.frame with the contents of
#' the provided BED file with an additional gene name and exon name column
#' @examples
#' data(panelcn.mops)
#' countWindows
#' @keywords data
#' @author Gundula Povysil
NULL
#' Result data included in panelcn.mops
#' @name resultlist
#' @docType data
#' @title result object of runPanelcnMops - a list of instances of
#' "CNVDetectionResult"
#' @keywords data
#' @examples
#' data(panelcn.mops)
#' resultlist
#' @author Gundula Povysil
NULL
#' Data included in panelcn.mops
#' @name read.width
#' @docType data
#' @title read width used for calculating RCs of test and control
#' @keywords data
#' @examples
#' data(panelcn.mops)
#' read.width
#' @author Gundula Povysil
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MCMC_utils.R
\name{decide}
\alias{decide}
\title{Makes the Metropolis-Hastings acceptance decision, based upon the input (log) Metropolis-Hastings ratio}
\usage{
decide(logMetropolisRatio)
}
\arguments{
\item{logMetropolisRatio}{The log of the Metropolis-Hastings ratio, which is calculated from model probabilities and forward/reverse transition probabilities. Calculated as the ratio of the model probability under the proposal to that under the current values multiplied by the ratio of the reverse transition probability to the forward transition probability.}
}
\description{
This function returns a logical TRUE/FALSE value, indicating whether the proposed transition should be accepted (TRUE) or rejected (FALSE).
}
\details{
The Metropolis-Hastings accept/reject decisions is made as follows. If \code{logMetropolisRatio} is greater than 0, accept (return \code{TRUE}). Otherwise draw a uniform random number between 0 and 1 and accept if it is less that \code{exp(logMetropolisRatio}. The proposed transition will be rejected (return \code{FALSE}). If \code{logMetropolisRatio} is NA, NaN, or -Inf, a reject (\code{FALSE}) decision will be returned.
}
\author{
Daniel Turek
}
|
/packages/nimble/man/decide.Rd
|
permissive
|
DRJP/nimble
|
R
| false
| true
| 1,267
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MCMC_utils.R
\name{decide}
\alias{decide}
\title{Makes the Metropolis-Hastings acceptance decision, based upon the input (log) Metropolis-Hastings ratio}
\usage{
decide(logMetropolisRatio)
}
\arguments{
\item{logMetropolisRatio}{The log of the Metropolis-Hastings ratio, which is calculated from model probabilities and forward/reverse transition probabilities. Calculated as the ratio of the model probability under the proposal to that under the current values multiplied by the ratio of the reverse transition probability to the forward transition probability.}
}
\description{
This function returns a logical TRUE/FALSE value, indicating whether the proposed transition should be accepted (TRUE) or rejected (FALSE).
}
\details{
The Metropolis-Hastings accept/reject decisions is made as follows. If \code{logMetropolisRatio} is greater than 0, accept (return \code{TRUE}). Otherwise draw a uniform random number between 0 and 1 and accept if it is less that \code{exp(logMetropolisRatio}. The proposed transition will be rejected (return \code{FALSE}). If \code{logMetropolisRatio} is NA, NaN, or -Inf, a reject (\code{FALSE}) decision will be returned.
}
\author{
Daniel Turek
}
|
#' Scrape the web for Monty Python scripts
#'
#' Go get Monty Python scripts. This gets scripts
#' where the script is the multi-media version, not
#' the "working" version.
#'
#' @param offline Use an offline copy instead of fetching data
#' @param verbose Lots of printing
#' @return data.frame containing script info and script text
#' @export
#'
#' @examples
#' getScriptData(offline=TRUE)
getScriptData<-function(offline = FALSE, verbose=FALSE){
if(offline) return(scriptData)
getScriptURLs() %>%
purrr::by_row(getScript) %>%
dplyr::bind_rows() ->
basicdata
if(verbose) message("Got script raw data")
basicdata%>%
dplyr::filter(stringr::str_detect(name,"Script")) %>%
dplyr::filter(!stringr::str_detect(name,"Scripts")) %>%
dplyr::filter(!stringr::str_detect(name,"Working")) %>%
tidyr::separate(name,into=c("Script","Part")
,sep=stringr::fixed(" Part "),extra = "merge",fill="right") ->
filtereddata
if(verbose) message("Filtered raw data")
filtereddata%>%
dplyr::mutate(Script=stringr::str_replace(Script,stringr::fixed(" Multi-media Script"),"")) %>%
dplyr::mutate(Script=stringr::str_replace(Script,stringr::fixed(" Multi-Media Script"),"")) %>%
dplyr::distinct() %>%
dplyr::group_by(URL) %>%
dplyr::filter(dplyr::row_number(Script)==1)%>%
dplyr::ungroup() ->
dedupeddata
if(verbose) message("Deduped data")
dedupeddata %>%
dplyr::group_by(Script) %>%
dplyr::count() %>%
dplyr::mutate(showid=dplyr::row_number(Script)) %>%
dplyr::select(-n) ->
scriptids
dedupeddata%>%
dplyr::inner_join(scriptids, by = "Script") %>%
dplyr::mutate(scriptid=dplyr::row_number(Script)) %>%
dplyr::filter(!is.na(.out)) %>%
tidyr::unnest(.out) %>%
dplyr::select(dplyr::ends_with("id"), dplyr::everything(), ScriptText=.out) %>%
dplyr::mutate(ScriptText=stringr::str_replace_all(ScriptText,stringr::fixed("\t"),"")) %>%
dplyr::mutate(ScriptText=stringr::str_trim(ScriptText)) ->
outputdata
if(verbose) message("Produced final format")
return(outputdata)
}
|
/R/getScriptData.R
|
no_license
|
kashenfelter/TextAnalysis
|
R
| false
| false
| 2,100
|
r
|
#' Scrape the web for Monty Python scripts
#'
#' Go get Monty Python scripts. This gets scripts
#' where the script is the multi-media version, not
#' the "working" version.
#'
#' @param offline Use an offline copy instead of fetching data
#' @param verbose Lots of printing
#' @return data.frame containing script info and script text
#' @export
#'
#' @examples
#' getScriptData(offline=TRUE)
getScriptData<-function(offline = FALSE, verbose=FALSE){
if(offline) return(scriptData)
getScriptURLs() %>%
purrr::by_row(getScript) %>%
dplyr::bind_rows() ->
basicdata
if(verbose) message("Got script raw data")
basicdata%>%
dplyr::filter(stringr::str_detect(name,"Script")) %>%
dplyr::filter(!stringr::str_detect(name,"Scripts")) %>%
dplyr::filter(!stringr::str_detect(name,"Working")) %>%
tidyr::separate(name,into=c("Script","Part")
,sep=stringr::fixed(" Part "),extra = "merge",fill="right") ->
filtereddata
if(verbose) message("Filtered raw data")
filtereddata%>%
dplyr::mutate(Script=stringr::str_replace(Script,stringr::fixed(" Multi-media Script"),"")) %>%
dplyr::mutate(Script=stringr::str_replace(Script,stringr::fixed(" Multi-Media Script"),"")) %>%
dplyr::distinct() %>%
dplyr::group_by(URL) %>%
dplyr::filter(dplyr::row_number(Script)==1)%>%
dplyr::ungroup() ->
dedupeddata
if(verbose) message("Deduped data")
dedupeddata %>%
dplyr::group_by(Script) %>%
dplyr::count() %>%
dplyr::mutate(showid=dplyr::row_number(Script)) %>%
dplyr::select(-n) ->
scriptids
dedupeddata%>%
dplyr::inner_join(scriptids, by = "Script") %>%
dplyr::mutate(scriptid=dplyr::row_number(Script)) %>%
dplyr::filter(!is.na(.out)) %>%
tidyr::unnest(.out) %>%
dplyr::select(dplyr::ends_with("id"), dplyr::everything(), ScriptText=.out) %>%
dplyr::mutate(ScriptText=stringr::str_replace_all(ScriptText,stringr::fixed("\t"),"")) %>%
dplyr::mutate(ScriptText=stringr::str_trim(ScriptText)) ->
outputdata
if(verbose) message("Produced final format")
return(outputdata)
}
|
#Packages to be installed
install.packages('dplyr')
install.packages('ggplot2')
install.packages("devtools")
devtools::install_github("phil8192/lazy-iris")
install.packages('caTools')
install.packages('rpart')
install.packages('rpart.plot')
#Libraries to be installed
library(dplyr)
library(ggplot2)
require(lazyIris)
library(readr)
library(devtools)
library(rpart)
library(rpart.plot)
library(caTools)
#To clear Environment
rm(list =ls())
#Read the dataset from onmline
iris_data <- read.csv("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data",
header = FALSE)
#Assigning column names to the dataset
colnames(iris_data)=c('sepal.length','sepal.width','petal.length','petal.width','Class')
#Data checking
checkData <- function(iris_data) {
# clean missing values (could also interpolate).
if(any(is.na(iris_data))) {
iris_data <- iris_data[!apply(iris_data, 1, function(v) any(is.na(v))), ]
warning("removed rows with missing values.")
}
# remove duplicates (could also check for conflicting species.)
if(anyDuplicated(iris_data)) {
iris_data <- unique(iris_data)
warning("removed duplicated rows.")
}
# remove strange measurements.
if(any(iris_data[, 1:4] <= 0)) {
iris_data <- iris_data[!apply(iris_data, 1, function(v) any(v <= 0)), ]
warning("removed instances with width/length <= 0.")
}
# check for anything odd. (could also check for outliers etc.)
if(any(iris_data[, 1:4] > 100)) {
warning("dataset contains gigantic iris plants.")
}
}
checkData(iris_data)
#Prompt the user for 4 inputs needed
query_from_user <- list(
sepal.length = as.numeric(readline('Please input Sepal length')),
sepal.width = as.numeric(readline('Please input Sepal width')),
petal.length = as.numeric(readline('Please input petal length')),
petal.width = as.numeric(readline('Please input petal width')))
#obtain the nearest-neighbours using euclidean distance
top.10 <- knn(query_from_user, iris_data, 10)
print(top.10, row.names=FALSE)
#Assigning column name
colnames(top.10)[5]<-'Species'
#Visualization
visualise(iris_data, class.name="Class", query=query_from_user, neighbours=top.10,
main="Iris data neighbours", plot.hist=FALSE, plot.cor=FALSE)
#To make the results reproducible
set.seed(6)
#Splitting Training and test data
total_data <- sample.split(seq_len(nrow(iris_data)), 0.7)
train_data <- iris_data[total_data, ]
test_data <- iris_data[!total_data, ]
#Findings using decision trees
set.seed(2387)
#Building decision tree model
dt_model <- rpart(Class ~ ., train_data) # training
#Visualizing Decision Tree
prp(dt_model)
|
/iris_data_retrieval.R
|
no_license
|
shivaramselvaraj/shivaram_selvaraj
|
R
| false
| false
| 2,752
|
r
|
#Packages to be installed
install.packages('dplyr')
install.packages('ggplot2')
install.packages("devtools")
devtools::install_github("phil8192/lazy-iris")
install.packages('caTools')
install.packages('rpart')
install.packages('rpart.plot')
#Libraries to be installed
library(dplyr)
library(ggplot2)
require(lazyIris)
library(readr)
library(devtools)
library(rpart)
library(rpart.plot)
library(caTools)
#To clear Environment
rm(list =ls())
#Read the dataset from onmline
iris_data <- read.csv("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data",
header = FALSE)
#Assigning column names to the dataset
colnames(iris_data)=c('sepal.length','sepal.width','petal.length','petal.width','Class')
#Data checking
checkData <- function(iris_data) {
# clean missing values (could also interpolate).
if(any(is.na(iris_data))) {
iris_data <- iris_data[!apply(iris_data, 1, function(v) any(is.na(v))), ]
warning("removed rows with missing values.")
}
# remove duplicates (could also check for conflicting species.)
if(anyDuplicated(iris_data)) {
iris_data <- unique(iris_data)
warning("removed duplicated rows.")
}
# remove strange measurements.
if(any(iris_data[, 1:4] <= 0)) {
iris_data <- iris_data[!apply(iris_data, 1, function(v) any(v <= 0)), ]
warning("removed instances with width/length <= 0.")
}
# check for anything odd. (could also check for outliers etc.)
if(any(iris_data[, 1:4] > 100)) {
warning("dataset contains gigantic iris plants.")
}
}
checkData(iris_data)
#Prompt the user for 4 inputs needed
query_from_user <- list(
sepal.length = as.numeric(readline('Please input Sepal length')),
sepal.width = as.numeric(readline('Please input Sepal width')),
petal.length = as.numeric(readline('Please input petal length')),
petal.width = as.numeric(readline('Please input petal width')))
#obtain the nearest-neighbours using euclidean distance
top.10 <- knn(query_from_user, iris_data, 10)
print(top.10, row.names=FALSE)
#Assigning column name
colnames(top.10)[5]<-'Species'
#Visualization
visualise(iris_data, class.name="Class", query=query_from_user, neighbours=top.10,
main="Iris data neighbours", plot.hist=FALSE, plot.cor=FALSE)
#To make the results reproducible
set.seed(6)
#Splitting Training and test data
total_data <- sample.split(seq_len(nrow(iris_data)), 0.7)
train_data <- iris_data[total_data, ]
test_data <- iris_data[!total_data, ]
#Findings using decision trees
set.seed(2387)
#Building decision tree model
dt_model <- rpart(Class ~ ., train_data) # training
#Visualizing Decision Tree
prp(dt_model)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/try_GET_content.R
\name{try_GET_content}
\alias{try_GET_content}
\title{Try httr::GET and httr::content function at least 5 times If got errors, save error message.}
\usage{
try_GET_content(url, times = 5)
}
\arguments{
\item{url}{url want to read.}
\item{times}{trying times.}
}
\description{
Try httr::GET and httr::content function at least 5 times If got errors, save error message.
}
|
/man/try_GET_content.Rd
|
permissive
|
lawine90/datagokR
|
R
| false
| true
| 468
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/try_GET_content.R
\name{try_GET_content}
\alias{try_GET_content}
\title{Try httr::GET and httr::content function at least 5 times If got errors, save error message.}
\usage{
try_GET_content(url, times = 5)
}
\arguments{
\item{url}{url want to read.}
\item{times}{trying times.}
}
\description{
Try httr::GET and httr::content function at least 5 times If got errors, save error message.
}
|
# Script for final project, November 20 2017
# Inbar Maayan
rm(list = ls())
# Load libraries (they are called again where needed, so that it's clear which library was used for which code)
library(truncnorm)
library(lme4)
library(rstanarm)
library(shinystan)
library(dplyr)
library(ggplot2)
library(rvertnet)
library(maps)
library(mapdata)
####################
# Simulate fake data
set.seed(95)
I <- 100 # number of islands in simulated dataset
spp <- round(rtruncnorm(n = I, a = 0, b = 15, mean = 6, sd = 3)) # how many species are on each island.
N <- sum(spp) #total number of species in the simulated dataset
hist(spp) #looks good
ID <- seq(1:N) # naming each species, here by number (species 1 through species N)
island <- as.numeric() # which island number (1-I) the species is on, for each species ID
for(i in 1:I){
island = c(island, rep(i, spp[i]))
}
sp_num <- as.numeric() #number of species that occur on the island that a given species is on
for(i in 1:I){
sp_num = c(sp_num, rep(spp[i], spp[i]))
}
###############
# Delete some species from the data. must re-run script from top each time you delete in order to make full dataframe to delete rows from
N #find out how many I have to begin with
fake_anolis <- data.frame(ID, island, sp_num)
delrows <- round(runif(200, 1, N))
obs_fake_anolis <- fake_anolis[-delrows,]
N <- length(obs_fake_anolis$ID) ###### a new N!
island <- obs_fake_anolis$island ##### a new island!
sp_num <- obs_fake_anolis$sp_num ##### a new sp_num!
###############
# Hyperparameters for alpha (intercept by island)
mu_a <- 1.3
sigma_a <- 0.04
b <- -0.004 # the relationship between number of congeners and sexual dimorphism (SD)
sigma_y <- 0.14 # the error not explained by the predictors in the model
# simulate intercepts (int) for each island
int_island <- rep(0,I)
for(i in 1:I){
int_island[i] <- rnorm(1, mu_a, sigma_a)
}
# Visualize
hist(int_island)
hist(rnorm(1000, 0, 0.15 ))
# My MODEL for making sexual dimorphism for each species, which is sd ~ a(island) + b*number of species on each island + error
sd <- rep(0, N)
for(n in 1:N){
sd[n] <- rnorm (1, int_island[island[n]] + b*sp_num[n], sigma_y)
}
# Center SD data
sd_c <- scale(sd, center= TRUE, scale = FALSE)
# Visualize
plot(sd_c~sd)
hist(sd_c)
library(lme4)
fit <- lmer(sd_c ~ sp_num + (1|island))
fit
#A note on the rstanarm default prior: "The default priors used in the various rstanarm modeling functions are intended to be
# weakly informative in that they provide moderate regularlization and help stabilize computation."
# Model in rstanarm
library(rstanarm)
library(shinystan)
fit <- stan_lmer(sd_c ~ sp_num + (1|island))
summary(fit, digits = 3)
launch_shinystan(fit)
################
## The real data
setwd("C:/Users/Inbar/Desktop/HARVARD/G2/Fall2017/OEB201_Modeling/Project")
library(dplyr)
library(ggplot2)
# Some housekeeping
dat <- read.csv ("BaseData.csv", header = TRUE)
options(stringsAsFactors = FALSE)
summary(dat)
names(dat)
dat$Digitizer <- as.factor(dat$Digitizer)
dat$Sex <- as.factor(dat$Sex)
dat$Species <- as.factor(dat$Species)
dat$Island <- as.factor(dat$Island)
dat$Ecomorph <- as.factor(dat$Ecomorph)
dat <- tbl_df(dat)
dat <- rename(dat, CommSize = friends)
# I have A. wattsi from two islands, but one of them is the subspecies anolis wattsi pogus. I remove it for simplicity.
dat <- dat[- grep("wattsi pogus", dat$ID),]
# Too little data for Anolis equestris, Anolis alutaceus, Anolis brunneus. Inelegantly remove species from data
dat2 <- tbl_df(filter(dat, !Species == "equestris"))
droplevels(dat2$Species)
dat2$Species <- factor(dat2$Species)
levels(dat2$Species)
dat3 <- tbl_df(filter(dat2, !Species == "alutaceus"))
droplevels(dat3$Species)
dat3$Species <- factor(dat3$Species)
levels(dat3$Species)
dat4 <- tbl_df(filter(dat3, !Species == "brunneus"))
droplevels(dat4$Species)
dat4$Species <- factor(dat4$Species)
levels(dat4$Species)
dat <- dat4
# Dataframe indicating which island each species comes from
d <- select(dat, Species, Island)
dd <- unique(d)
ddd <- arrange(dd, Species)
length(ddd$Species) #check that I have the correct number of species
# Calculate SD, number of congeners -- SD based on male & female mean values
females <- subset(dat, dat$Sex == "F")
fem <- aggregate(females$SVLruler, list(females$Species), mean, na.rm=TRUE)
fem <- rename(fem, species = Group.1, f_svl = x)
males <- subset(dat, dat$Sex == "M")
mal <- aggregate(males$SVLruler, list(males$Species), mean, na.rm=TRUE)
mal <- rename(mal, species = Group.1, m_svl = x)
spp_friends <- aggregate(dat$CommSize, list(dat$Species), mean)
# friends are the total number of species in the community in question
# these numbers are estimated from the "Mapping distributions" part (see below) and from some prior knowledge of Anolis distributions
anolis <- data.frame(fem$species, fem$f_svl, mal$m_svl, spp_friends$x, ddd$Island)
anolis <- rename(anolis, species = fem.species, f_svl = fem.f_svl, m_svl = mal.m_svl, friends = spp_friends.x, island = ddd.Island)
# Computing standard deviation, the response variable.
anolis$SD <- anolis$m_svl / anolis$f_svl
p <- ggplot() + geom_jitter(data = anolis, aes(x=friends, y = SD), size = 2)
p + theme_minimal()
library(lme4)
fit <- lmer(SD ~ friends + (1|island), data=anolis)
fit
fit <- stan_lmer(SD ~ friends + (1|island), data=anolis)
summary(fit, digits=3)
launch_shinystan(fit)
# Calculate SD, number of congeners -- SD based on male & female max values
females <- subset(dat, dat$Sex == "F")
fem <- aggregate(females$SVLruler, list(females$Species), max, na.rm=TRUE)
fem <- rename(fem, species = Group.1, f_svl = x)
males <- subset(dat, dat$Sex == "M")
mal <- aggregate(males$SVLruler, list(males$Species), max, na.rm=TRUE)
mal <- rename(mal, species = Group.1, m_svl = x)
spp_friends <- aggregate(dat$CommSize, list(dat$Species), mean)
anolis <- data.frame(fem$species, fem$f_svl, mal$m_svl, spp_friends$x, ddd$Island)
anolis <- rename(anolis, species = fem.species, f_svl = fem.f_svl, m_svl = mal.m_svl, friends = spp_friends.x, island = ddd.Island)
anolis$SD <- anolis$m_svl / anolis$f_svl
p <- ggplot() + geom_jitter(data = anolis, aes(x=friends, y = SD), size = 2)
p + theme_minimal()
library(lme4)
fit <- lmer(SD ~ friends + (1|island), data=anolis)
fit
fit <- stan_lmer(SD ~ friends + (1|island), data=anolis)
summary(fit, digits=3)
launch_shinystan(fit)
###############################
## Mapping lizard distributions, in order to get an idea of how many species are in each community
# Scrape museum data from VertNet repository (http://www.vertnet.org)
install.packages("rvertnet")
library(rvertnet)
# vector of species names to search Vertnet
spp <- c(names(liz[,1:60]))
name <- spp[60]
# Search vertnet
bigsearch(specificepithet = name, genus = "Anolis", mappable = TRUE,
rfile = "anoles", email = "your@email") # to use, input your email
# After combining and cleaning the VertNet files in Excel (apologies for this), mapping species distributions
library(maps)
library(mapdata)
library(dplyr)
library(ggplot2)
# Read in lizard locality data
dist <- read.csv("C:/Users/Inbar/Desktop/anoles/csv/AllPoints.csv")
names(dist)
dist <- tbl_df(dist)
# Map the Caribbean
carib <- map("worldHires", col="gray95", fill=TRUE, ylim=c(9.7,29), xlim=c(-86,-60))
# Subsets of the Caribbean, to look at species distributions
dist$country <- as.factor(dist$country)
dist$col <- as.character(dist$col)
levels(dist$country)
# Northern Lesser Antilles
lessAnt <- map("worldHires", col="gray95", fill=TRUE, ylim=c(16.5,18.5), xlim=c(-65.5,-60))
title("Northern Lesser Antilles")
Nlessers <- c("USVI", "Leeward")
LessAnt <- filter(dist, country %in% Nlessers)
points(LessAnt$decimallongitude, LessAnt$decimallatitude, pch=8, col=LessAnt$col, cex=1)
legnames <- unique(LessAnt$specificepithet)
legcol <- unique(LessAnt$col)
legend("bottomright", legend=legnames, col=legcol, pch = 8, bg = "gray95")
# Southern Lesser Antilles
lessAnt <- map("worldHires", col="gray95", fill=TRUE, ylim=c(11.4,16), xlim=c(-64,-58))
title("Southern Lesser Antilles")
Slessers <- c("Dominica", "Martinique", "St. Lucia", "Grenada", "Saint Vincent and the Grenadines")
LessAnt <- filter(dist, country %in% Slessers)
points(LessAnt$decimallongitude, LessAnt$decimallatitude, pch=8, col=LessAnt$col, cex=1)
legnames <- unique(LessAnt$specificepithet)
legcol <- unique(LessAnt$col)
legend("topright", legend=legnames, col=legcol, pch = 8, bg = "gray95")
# Navassa: can't draw Navassa because it's too small and uninhabited to be on the map, but it only has A. longiceps on it.
# Cayman Islands
cayman <- map("worldHires", col="gray95", fill=TRUE, ylim=c(19.2,19.8), xlim=c(-81.5,-79.8))
title("Cayman Islands")
Caymans <- filter(dist, dist$country == "Cayman Islands")
points(Caymans$decimallongitude, Caymans$decimallatitude, pch=8, col=Caymans$col, cex=1)
legnames <- unique(Caymans$specificepithet)
legcol <- unique(Caymans$col)
legend("bottomright", legend=legnames, col=legcol, pch = 8, bg = "gray95")
# The Bahamas
bahamas <- map("worldHires", col="gray95", fill=TRUE, ylim=c(20.95,27.5), xlim=c(-78.9,-71.5))
title("The Bahamas")
Bahamas <- filter(dist, dist$country == "Bahamas")
points(Bahamas$decimallongitude, Bahamas$decimallatitude, pch=8, col=Bahamas$col, cex=1)
legnames <- unique(Bahamas$specificepithet)
legcol <- unique(Bahamas$col)
legend("topright", legend=legnames, col=legcol, pch = 8, bg = "gray95")
# Cuba
cuba <- map("worldHires", col="gray95", fill=TRUE, ylim=c(19,23.3), xlim=c(-85,-74.2))
title("Cuba")
Cuba <- filter(dist, dist$country == "Cuba")
points(Cuba$decimallongitude, Cuba$decimallatitude, pch=8, col=Cuba$col, cex=1)
legnames <- unique(Cuba$specificepithet)
legcol <- unique(Cuba$col)
legend("bottomleft", legend=legnames, col=legcol, pch = 8, bg = "gray95")
# Hispaniola
hispanola <- map("worldHires", col="gray95", fill=TRUE, ylim=c(17.1,20.3), xlim=c(-74.5,-67.25))
title("Hispaniola \n (Haiti & The Dominican Republic)")
Hispanola <- filter(dist, dist$country == "Hispanola")
points(Hispanola$decimallongitude, Hispanola$decimallatitude, pch=8, col=Hispanola$col, cex=1)
legnames <- unique(Hispanola$specificepithet)
legcol <- unique(Hispanola$col)
legend("topright", legend=legnames, col=legcol, pch = 8, bg = "gray95")
# Puerto Rico
puerto <- map("worldHires", col="gray95", fill=TRUE, ylim=c(17.9,18.7), xlim=c(-68,-65.2))
title("Puerto Rico")
Puerto <- filter(dist, dist$country == "Puerto Rico")
points(Puerto$decimallongitude, Puerto$decimallatitude, pch=8, col=Puerto$col, cex=1)
legnames <- unique(Puerto$specificepithet)
legcol <- unique(Puerto$col)
legend("topleft", legend=legnames, col=legcol, pch = 8, bg = "gray95")
# Jamaica (probably has too sparse sampling in my records to say anything about distributions)
jamaica <- map("worldHires", col="gray95", fill=TRUE, ylim=c(17.5,18.6), xlim=c(-79,-76))
title("Jamaica")
Jamaica <- filter(dist, dist$country == "Jamaica")
points(Jamaica$decimallongitude, Jamaica$decimallatitude, pch=8, col=Jamaica$col, cex=1)
legnames <- unique(Jamaica$specificepithet)
legcol <- unique(Jamaica$col)
legend("topleft", legend=legnames, col=legcol, pch = 8, bg = "gray95")
|
/finalprojects/Inbar/Maayan_script.R
|
no_license
|
lizzieinclass/oeb201
|
R
| false
| false
| 11,506
|
r
|
# Script for final project, November 20 2017
# Inbar Maayan
rm(list = ls())
# Load libraries (they are called again where needed, so that it's clear which library was used for which code)
library(truncnorm)
library(lme4)
library(rstanarm)
library(shinystan)
library(dplyr)
library(ggplot2)
library(rvertnet)
library(maps)
library(mapdata)
####################
# Simulate fake data
set.seed(95)
I <- 100 # number of islands in simulated dataset
spp <- round(rtruncnorm(n = I, a = 0, b = 15, mean = 6, sd = 3)) # how many species are on each island.
N <- sum(spp) #total number of species in the simulated dataset
hist(spp) #looks good
ID <- seq(1:N) # naming each species, here by number (species 1 through species N)
island <- as.numeric() # which island number (1-I) the species is on, for each species ID
for(i in 1:I){
island = c(island, rep(i, spp[i]))
}
sp_num <- as.numeric() #number of species that occur on the island that a given species is on
for(i in 1:I){
sp_num = c(sp_num, rep(spp[i], spp[i]))
}
###############
# Delete some species from the data. must re-run script from top each time you delete in order to make full dataframe to delete rows from
N #find out how many I have to begin with
fake_anolis <- data.frame(ID, island, sp_num)
delrows <- round(runif(200, 1, N))
obs_fake_anolis <- fake_anolis[-delrows,]
N <- length(obs_fake_anolis$ID) ###### a new N!
island <- obs_fake_anolis$island ##### a new island!
sp_num <- obs_fake_anolis$sp_num ##### a new sp_num!
###############
# Hyperparameters for alpha (intercept by island)
mu_a <- 1.3
sigma_a <- 0.04
b <- -0.004 # the relationship between number of congeners and sexual dimorphism (SD)
sigma_y <- 0.14 # the error not explained by the predictors in the model
# simulate intercepts (int) for each island
int_island <- rep(0,I)
for(i in 1:I){
int_island[i] <- rnorm(1, mu_a, sigma_a)
}
# Visualize
hist(int_island)
hist(rnorm(1000, 0, 0.15 ))
# My MODEL for making sexual dimorphism for each species, which is sd ~ a(island) + b*number of species on each island + error
sd <- rep(0, N)
for(n in 1:N){
sd[n] <- rnorm (1, int_island[island[n]] + b*sp_num[n], sigma_y)
}
# Center SD data
sd_c <- scale(sd, center= TRUE, scale = FALSE)
# Visualize
plot(sd_c~sd)
hist(sd_c)
library(lme4)
fit <- lmer(sd_c ~ sp_num + (1|island))
fit
#A note on the rstanarm default prior: "The default priors used in the various rstanarm modeling functions are intended to be
# weakly informative in that they provide moderate regularlization and help stabilize computation."
# Model in rstanarm
library(rstanarm)
library(shinystan)
fit <- stan_lmer(sd_c ~ sp_num + (1|island))
summary(fit, digits = 3)
launch_shinystan(fit)
################
## The real data
setwd("C:/Users/Inbar/Desktop/HARVARD/G2/Fall2017/OEB201_Modeling/Project")
library(dplyr)
library(ggplot2)
# Some housekeeping
dat <- read.csv ("BaseData.csv", header = TRUE)
options(stringsAsFactors = FALSE)
summary(dat)
names(dat)
dat$Digitizer <- as.factor(dat$Digitizer)
dat$Sex <- as.factor(dat$Sex)
dat$Species <- as.factor(dat$Species)
dat$Island <- as.factor(dat$Island)
dat$Ecomorph <- as.factor(dat$Ecomorph)
dat <- tbl_df(dat)
dat <- rename(dat, CommSize = friends)
# I have A. wattsi from two islands, but one of them is the subspecies anolis wattsi pogus. I remove it for simplicity.
dat <- dat[- grep("wattsi pogus", dat$ID),]
# Too little data for Anolis equestris, Anolis alutaceus, Anolis brunneus. Inelegantly remove species from data
dat2 <- tbl_df(filter(dat, !Species == "equestris"))
droplevels(dat2$Species)
dat2$Species <- factor(dat2$Species)
levels(dat2$Species)
dat3 <- tbl_df(filter(dat2, !Species == "alutaceus"))
droplevels(dat3$Species)
dat3$Species <- factor(dat3$Species)
levels(dat3$Species)
dat4 <- tbl_df(filter(dat3, !Species == "brunneus"))
droplevels(dat4$Species)
dat4$Species <- factor(dat4$Species)
levels(dat4$Species)
dat <- dat4
# Dataframe indicating which island each species comes from
d <- select(dat, Species, Island)
dd <- unique(d)
ddd <- arrange(dd, Species)
length(ddd$Species) #check that I have the correct number of species
# Calculate SD, number of congeners -- SD based on male & female mean values
females <- subset(dat, dat$Sex == "F")
fem <- aggregate(females$SVLruler, list(females$Species), mean, na.rm=TRUE)
fem <- rename(fem, species = Group.1, f_svl = x)
males <- subset(dat, dat$Sex == "M")
mal <- aggregate(males$SVLruler, list(males$Species), mean, na.rm=TRUE)
mal <- rename(mal, species = Group.1, m_svl = x)
spp_friends <- aggregate(dat$CommSize, list(dat$Species), mean)
# friends are the total number of species in the community in question
# these numbers are estimated from the "Mapping distributions" part (see below) and from some prior knowledge of Anolis distributions
anolis <- data.frame(fem$species, fem$f_svl, mal$m_svl, spp_friends$x, ddd$Island)
anolis <- rename(anolis, species = fem.species, f_svl = fem.f_svl, m_svl = mal.m_svl, friends = spp_friends.x, island = ddd.Island)
# Computing standard deviation, the response variable.
anolis$SD <- anolis$m_svl / anolis$f_svl
p <- ggplot() + geom_jitter(data = anolis, aes(x=friends, y = SD), size = 2)
p + theme_minimal()
library(lme4)
fit <- lmer(SD ~ friends + (1|island), data=anolis)
fit
fit <- stan_lmer(SD ~ friends + (1|island), data=anolis)
summary(fit, digits=3)
launch_shinystan(fit)
# Calculate SD, number of congeners -- SD based on male & female max values
females <- subset(dat, dat$Sex == "F")
fem <- aggregate(females$SVLruler, list(females$Species), max, na.rm=TRUE)
fem <- rename(fem, species = Group.1, f_svl = x)
males <- subset(dat, dat$Sex == "M")
mal <- aggregate(males$SVLruler, list(males$Species), max, na.rm=TRUE)
mal <- rename(mal, species = Group.1, m_svl = x)
spp_friends <- aggregate(dat$CommSize, list(dat$Species), mean)
anolis <- data.frame(fem$species, fem$f_svl, mal$m_svl, spp_friends$x, ddd$Island)
anolis <- rename(anolis, species = fem.species, f_svl = fem.f_svl, m_svl = mal.m_svl, friends = spp_friends.x, island = ddd.Island)
anolis$SD <- anolis$m_svl / anolis$f_svl
p <- ggplot() + geom_jitter(data = anolis, aes(x=friends, y = SD), size = 2)
p + theme_minimal()
library(lme4)
fit <- lmer(SD ~ friends + (1|island), data=anolis)
fit
fit <- stan_lmer(SD ~ friends + (1|island), data=anolis)
summary(fit, digits=3)
launch_shinystan(fit)
###############################
## Mapping lizard distributions, in order to get an idea of how many species are in each community
# Scrape museum data from VertNet repository (http://www.vertnet.org)
install.packages("rvertnet")
library(rvertnet)
# vector of species names to search Vertnet
spp <- c(names(liz[,1:60]))
name <- spp[60]
# Search vertnet
bigsearch(specificepithet = name, genus = "Anolis", mappable = TRUE,
rfile = "anoles", email = "your@email") # to use, input your email
# After combining and cleaning the VertNet files in Excel (apologies for this), mapping species distributions
library(maps)
library(mapdata)
library(dplyr)
library(ggplot2)
# Read in lizard locality data
dist <- read.csv("C:/Users/Inbar/Desktop/anoles/csv/AllPoints.csv")
names(dist)
dist <- tbl_df(dist)
# Map the Caribbean
carib <- map("worldHires", col="gray95", fill=TRUE, ylim=c(9.7,29), xlim=c(-86,-60))
# Subsets of the Caribbean, to look at species distributions
dist$country <- as.factor(dist$country)
dist$col <- as.character(dist$col)
levels(dist$country)
# Northern Lesser Antilles
lessAnt <- map("worldHires", col="gray95", fill=TRUE, ylim=c(16.5,18.5), xlim=c(-65.5,-60))
title("Northern Lesser Antilles")
Nlessers <- c("USVI", "Leeward")
LessAnt <- filter(dist, country %in% Nlessers)
points(LessAnt$decimallongitude, LessAnt$decimallatitude, pch=8, col=LessAnt$col, cex=1)
legnames <- unique(LessAnt$specificepithet)
legcol <- unique(LessAnt$col)
legend("bottomright", legend=legnames, col=legcol, pch = 8, bg = "gray95")
# Southern Lesser Antilles
lessAnt <- map("worldHires", col="gray95", fill=TRUE, ylim=c(11.4,16), xlim=c(-64,-58))
title("Southern Lesser Antilles")
Slessers <- c("Dominica", "Martinique", "St. Lucia", "Grenada", "Saint Vincent and the Grenadines")
LessAnt <- filter(dist, country %in% Slessers)
points(LessAnt$decimallongitude, LessAnt$decimallatitude, pch=8, col=LessAnt$col, cex=1)
legnames <- unique(LessAnt$specificepithet)
legcol <- unique(LessAnt$col)
legend("topright", legend=legnames, col=legcol, pch = 8, bg = "gray95")
# Navassa: can't draw Navassa because it's too small and uninhabited to be on the map, but it only has A. longiceps on it.
# Cayman Islands
cayman <- map("worldHires", col="gray95", fill=TRUE, ylim=c(19.2,19.8), xlim=c(-81.5,-79.8))
title("Cayman Islands")
Caymans <- filter(dist, dist$country == "Cayman Islands")
points(Caymans$decimallongitude, Caymans$decimallatitude, pch=8, col=Caymans$col, cex=1)
legnames <- unique(Caymans$specificepithet)
legcol <- unique(Caymans$col)
legend("bottomright", legend=legnames, col=legcol, pch = 8, bg = "gray95")
# The Bahamas
bahamas <- map("worldHires", col="gray95", fill=TRUE, ylim=c(20.95,27.5), xlim=c(-78.9,-71.5))
title("The Bahamas")
Bahamas <- filter(dist, dist$country == "Bahamas")
points(Bahamas$decimallongitude, Bahamas$decimallatitude, pch=8, col=Bahamas$col, cex=1)
legnames <- unique(Bahamas$specificepithet)
legcol <- unique(Bahamas$col)
legend("topright", legend=legnames, col=legcol, pch = 8, bg = "gray95")
# Cuba
cuba <- map("worldHires", col="gray95", fill=TRUE, ylim=c(19,23.3), xlim=c(-85,-74.2))
title("Cuba")
Cuba <- filter(dist, dist$country == "Cuba")
points(Cuba$decimallongitude, Cuba$decimallatitude, pch=8, col=Cuba$col, cex=1)
legnames <- unique(Cuba$specificepithet)
legcol <- unique(Cuba$col)
legend("bottomleft", legend=legnames, col=legcol, pch = 8, bg = "gray95")
# Hispaniola
hispanola <- map("worldHires", col="gray95", fill=TRUE, ylim=c(17.1,20.3), xlim=c(-74.5,-67.25))
title("Hispaniola \n (Haiti & The Dominican Republic)")
Hispanola <- filter(dist, dist$country == "Hispanola")
points(Hispanola$decimallongitude, Hispanola$decimallatitude, pch=8, col=Hispanola$col, cex=1)
legnames <- unique(Hispanola$specificepithet)
legcol <- unique(Hispanola$col)
legend("topright", legend=legnames, col=legcol, pch = 8, bg = "gray95")
# Puerto Rico
puerto <- map("worldHires", col="gray95", fill=TRUE, ylim=c(17.9,18.7), xlim=c(-68,-65.2))
title("Puerto Rico")
Puerto <- filter(dist, dist$country == "Puerto Rico")
points(Puerto$decimallongitude, Puerto$decimallatitude, pch=8, col=Puerto$col, cex=1)
legnames <- unique(Puerto$specificepithet)
legcol <- unique(Puerto$col)
legend("topleft", legend=legnames, col=legcol, pch = 8, bg = "gray95")
# Jamaica (probably has too sparse sampling in my records to say anything about distributions)
jamaica <- map("worldHires", col="gray95", fill=TRUE, ylim=c(17.5,18.6), xlim=c(-79,-76))
title("Jamaica")
Jamaica <- filter(dist, dist$country == "Jamaica")
points(Jamaica$decimallongitude, Jamaica$decimallatitude, pch=8, col=Jamaica$col, cex=1)
legnames <- unique(Jamaica$specificepithet)
legcol <- unique(Jamaica$col)
legend("topleft", legend=legnames, col=legcol, pch = 8, bg = "gray95")
|
\name{sampleParam}
\alias{sampleParam}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
sampleParam
}
\description{
Find the parameters with numer of entry
}
\usage{
sampleParam(name,data)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{name}{ number of the entry}
\item{data}{data with parameters}
}
\value{
\item{index of spectra}{position of the spectra}
\item{param}{parameters of the selected sample}
%% ...
}
\references{
mylims.org
}
\author{
Julien Wist, Jessica Medina
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
See also \code{\link{lims.getNmrs}}.
}
\examples{
data(coffee)
sampleParam("8571129",data)
}
\keyword{ entry }
\keyword{ param }% __ONLY ONE__ keyword per line
|
/man/sampleParam.Rd
|
no_license
|
jwist/rLims
|
R
| false
| false
| 783
|
rd
|
\name{sampleParam}
\alias{sampleParam}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
sampleParam
}
\description{
Find the parameters with numer of entry
}
\usage{
sampleParam(name,data)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{name}{ number of the entry}
\item{data}{data with parameters}
}
\value{
\item{index of spectra}{position of the spectra}
\item{param}{parameters of the selected sample}
%% ...
}
\references{
mylims.org
}
\author{
Julien Wist, Jessica Medina
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
See also \code{\link{lims.getNmrs}}.
}
\examples{
data(coffee)
sampleParam("8571129",data)
}
\keyword{ entry }
\keyword{ param }% __ONLY ONE__ keyword per line
|
#' @title FUNCTION_TITLE
#' @description FUNCTION_DESCRIPTION
#' @param pathfile PARAM_DESCRIPTION, Default: '~/Z/ABRAID/prevalence modelling/under five mortality/paths_for_nick.csv'
#' @return OUTPUT_DESCRIPTION
#' @details DETAILS
#' @examples
#' \dontrun{
#' if (interactive()) {
#' # EXAMPLE1
#' }
#' }
#' @rdname getPaths
#' @export
getPaths <- function(pathfile = "~/Z/ABRAID/prevalence modelling/under five mortality/paths_for_nick.csv") {
# get file paths for the key datasets
# path points to a csv file containing named paths, the function returns a dataframe
# of named filepaths
paths <- read.csv(pathfile,
row.names = 1
)
data.frame(t(paths),
stringsAsFactors = FALSE
)
}
|
/mbg/mbg_core_code/mbg_central/LBDCore/R/getPaths.R
|
no_license
|
The-Oxford-GBD-group/typhi_paratyphi_modelling_code
|
R
| false
| false
| 709
|
r
|
#' @title FUNCTION_TITLE
#' @description FUNCTION_DESCRIPTION
#' @param pathfile PARAM_DESCRIPTION, Default: '~/Z/ABRAID/prevalence modelling/under five mortality/paths_for_nick.csv'
#' @return OUTPUT_DESCRIPTION
#' @details DETAILS
#' @examples
#' \dontrun{
#' if (interactive()) {
#' # EXAMPLE1
#' }
#' }
#' @rdname getPaths
#' @export
getPaths <- function(pathfile = "~/Z/ABRAID/prevalence modelling/under five mortality/paths_for_nick.csv") {
# get file paths for the key datasets
# path points to a csv file containing named paths, the function returns a dataframe
# of named filepaths
paths <- read.csv(pathfile,
row.names = 1
)
data.frame(t(paths),
stringsAsFactors = FALSE
)
}
|
## Lecture 7 - Other types of regression
install.packages("tidyverse")
install.packages("titanic")
install.packages("AER")
library(tidyverse)
library(broom)
library(titanic)
theme_set(theme_light())
# Create plot to show why linear regression is not good for binomial data
df_logit <-
tibble( y = seq(.0001,.9999,.0001),
x = psych::logit(y),
)
df <-
tibble( x = c(rnorm(500, -5, 3) , rnorm(500, 5, 3)),
y = c(rep(0, 500), rep(1,500))
)
ggplot(df) +
aes(x = x, y = y) +
geom_point(alpha = .2) +
geom_point(data = df_logit, size = .1, color = "blue") +
# geom_smooth(method = "lm", color = "red", linetype = "dashed", se = FALSE) +
coord_cartesian(ylim = c(-.25, 1.25)) +
labs(x = "Predictor", y = "Outcome")
# Use case for logistic regression ----------------------------------------
# We will use the titanic dataset
# Make the table printing neat, transform variable names to lowercase
titanic <-
titanic_train %>%
rename_all(str_to_lower) %>%
as_tibble()
# Fit logistic binomial regression
surv_fit <- glm(survived ~ fare * sex + sibsp + parch, family = "binomial", data = titanic)
summary(surv_fit)
tidy(surv_fit)
glance(surv_fit)
# To get the odds ratio, use the exp() function on the coefficients
exp(surv_fit$coefficients)
# Calculate confidence intervals for the ORs
exp(confint(surv_fit))
# But instead of the previous, do yourself a favor and use tidy with the following parameters to get ORs and conf int.
tidy(surv_fit, conf.int = TRUE, exponentiate = TRUE)
# Let's plot the data. Please mind that you need to tweek the arguments for geom_smooth() to fit a binomial logistic function.
ggplot(titanic) +
aes(y = survived, x = fare, group = sex, color = sex) +
geom_point() +
geom_smooth(method = "glm", method.args = list(family = "binomial")) +
coord_cartesian(ylim = c(0, 1)) +
scale_y_continuous(labels = scales::percent_format())
# Reporting logistic regression
library(sjPlot)
tab_model(surv_fit, show.aic = TRUE, show.loglik = TRUE, collapse.se = TRUE)
# To save it to html, do:
# Coefficients are automatically transformed to Odds Ratios
surv_fit_table_html <-
tab_model(surv_fit, show.aic = TRUE, show.loglik = TRUE, collapse.se = TRUE)
# You can save the results using the write_lines() function
write_lines(surv_fit_table_html, "surv_fit_table.html")
## Poisson regression
# Use poisson regression to predict a count-type variable (integer values, and totally left-skewed)
# We are predicting the number of family members on board, by age
titanic <-
titanic %>%
mutate(family = sibsp + parch)
# Check the distribution of family variable
titanic %>%
ggplot() +
aes(x = family) +
geom_histogram(bins = 10)
# Yep, definitely poisson distribution
# Fitting a poisson regression is not difficult, just use the family = "poisson" parameter
family_fit_pois <- glm(family ~ age, family = "poisson", data = titanic)
# Check the results. They look very much like the output of logistic regression, only the model summary statistics are different
summary(family_fit_pois)
tidy(family_fit_pois, exponentiate = TRUE, conf.int = TRUE)
glance(family_fit_pois)
# However the poisson regression is not apropriate for data that has a large dispersion
# Dispersion shoul not be significantly larger than 1
# We can test the dispersion like this:
AER::dispersiontest(family_fit_pois)
# We have to run a negative binomial regression, since dispersion is 1.9 (variance is more than 2x the mean). This parameter was calculated using quasipoisson family.
family_fit_nb <- MASS::glm.nb(family ~ age, data = titanic)
# Check the results
summary(family_fit_nb)
tidy(family_fit_nb, exponentiate = TRUE, conf.int = TRUE)
glance(family_fit_nb)
# You can create all the diagnostic values as for linear regression
augment(family_fit_nb)
# Let's plot this. Mind the geom_smooth() parameters!
titanic %>%
ggplot() +
aes(y = family, x = age) +
geom_point() +
geom_smooth(method = "glm", method.args = list(family = "poisson"))
# When reporting poisson/negative binomial regression, you have to report the same things as in logistic regression
tab_model(family_fit_nb)
#Cumulative Link Model for Ordinal data
install.packages("ordinal")
install.packages("janitor")
library(ordinal)
library(janitor)
# We will use a dataset about the ratings of NYC restaurants from A to C
restaurants <- read_csv("https://data.cityofnewyork.us/api/views/43nn-pn8j/rows.csv")
# we drop some irrelevant variables, filter a few values, tidy variable names
rest_clean <-
restaurants %>%
janitor::clean_names() %>%
select(boro, cuisine_description, critical_flag, score, grade) %>%
drop_na() %>%
filter(grade %in% c("A", "B", "C")) %>%
filter(cuisine_description %in% c("African", "American", "Asian", "Latin", "Middle Eastern")) %>%
filter(boro %in% c ("Bronx", "Brooklyn", "Manhattan", "Queens", "Staten Island"))
view(rest_clean)
# dependent variable needs to be a factor
rest_clean <-
rest_clean %>%
mutate(grade = as.factor(grade),
cuisine_description = fct_relevel(cuisine_description, "American"))
#building the cumulative link model
# Comparing to American cousine, and the BRONX
clm1 <- clm(grade ~ cuisine_description + boro, data = rest_clean)
summary(clm1)
#running post-hoc tests
emmeans::emmeans(clm1, "cuisine_description", "boro")
# testing the model assumption, the proportional odd's ratio
#with either the nominal_test or scale_test function
nominal_test(clm1)
scale_test(clm1)
#let's plot our data
ggplot(rest_clean, aes(x = cuisine_description, y = grade)) +
geom_boxplot(size = .75) +
geom_jitter(alpha = .5) +
facet_wrap("boro") +
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))
|
/Lecture 7 - Other types of regression.R
|
no_license
|
nthun/Data-analysis-in-R-2019-20-1
|
R
| false
| false
| 5,816
|
r
|
## Lecture 7 - Other types of regression
install.packages("tidyverse")
install.packages("titanic")
install.packages("AER")
library(tidyverse)
library(broom)
library(titanic)
theme_set(theme_light())
# Create plot to show why linear regression is not good for binomial data
df_logit <-
tibble( y = seq(.0001,.9999,.0001),
x = psych::logit(y),
)
df <-
tibble( x = c(rnorm(500, -5, 3) , rnorm(500, 5, 3)),
y = c(rep(0, 500), rep(1,500))
)
ggplot(df) +
aes(x = x, y = y) +
geom_point(alpha = .2) +
geom_point(data = df_logit, size = .1, color = "blue") +
# geom_smooth(method = "lm", color = "red", linetype = "dashed", se = FALSE) +
coord_cartesian(ylim = c(-.25, 1.25)) +
labs(x = "Predictor", y = "Outcome")
# Use case for logistic regression ----------------------------------------
# We will use the titanic dataset
# Make the table printing neat, transform variable names to lowercase
titanic <-
titanic_train %>%
rename_all(str_to_lower) %>%
as_tibble()
# Fit logistic binomial regression
surv_fit <- glm(survived ~ fare * sex + sibsp + parch, family = "binomial", data = titanic)
summary(surv_fit)
tidy(surv_fit)
glance(surv_fit)
# To get the odds ratio, use the exp() function on the coefficients
exp(surv_fit$coefficients)
# Calculate confidence intervals for the ORs
exp(confint(surv_fit))
# But instead of the previous, do yourself a favor and use tidy with the following parameters to get ORs and conf int.
tidy(surv_fit, conf.int = TRUE, exponentiate = TRUE)
# Let's plot the data. Please mind that you need to tweek the arguments for geom_smooth() to fit a binomial logistic function.
ggplot(titanic) +
aes(y = survived, x = fare, group = sex, color = sex) +
geom_point() +
geom_smooth(method = "glm", method.args = list(family = "binomial")) +
coord_cartesian(ylim = c(0, 1)) +
scale_y_continuous(labels = scales::percent_format())
# Reporting logistic regression
library(sjPlot)
tab_model(surv_fit, show.aic = TRUE, show.loglik = TRUE, collapse.se = TRUE)
# To save it to html, do:
# Coefficients are automatically transformed to Odds Ratios
surv_fit_table_html <-
tab_model(surv_fit, show.aic = TRUE, show.loglik = TRUE, collapse.se = TRUE)
# You can save the results using the write_lines() function
write_lines(surv_fit_table_html, "surv_fit_table.html")
## Poisson regression
# Use poisson regression to predict a count-type variable (integer values, and totally left-skewed)
# We are predicting the number of family members on board, by age
titanic <-
titanic %>%
mutate(family = sibsp + parch)
# Check the distribution of family variable
titanic %>%
ggplot() +
aes(x = family) +
geom_histogram(bins = 10)
# Yep, definitely poisson distribution
# Fitting a poisson regression is not difficult, just use the family = "poisson" parameter
family_fit_pois <- glm(family ~ age, family = "poisson", data = titanic)
# Check the results. They look very much like the output of logistic regression, only the model summary statistics are different
summary(family_fit_pois)
tidy(family_fit_pois, exponentiate = TRUE, conf.int = TRUE)
glance(family_fit_pois)
# However the poisson regression is not apropriate for data that has a large dispersion
# Dispersion shoul not be significantly larger than 1
# We can test the dispersion like this:
AER::dispersiontest(family_fit_pois)
# We have to run a negative binomial regression, since dispersion is 1.9 (variance is more than 2x the mean). This parameter was calculated using quasipoisson family.
family_fit_nb <- MASS::glm.nb(family ~ age, data = titanic)
# Check the results
summary(family_fit_nb)
tidy(family_fit_nb, exponentiate = TRUE, conf.int = TRUE)
glance(family_fit_nb)
# You can create all the diagnostic values as for linear regression
augment(family_fit_nb)
# Let's plot this. Mind the geom_smooth() parameters!
titanic %>%
ggplot() +
aes(y = family, x = age) +
geom_point() +
geom_smooth(method = "glm", method.args = list(family = "poisson"))
# When reporting poisson/negative binomial regression, you have to report the same things as in logistic regression
tab_model(family_fit_nb)
#Cumulative Link Model for Ordinal data
install.packages("ordinal")
install.packages("janitor")
library(ordinal)
library(janitor)
# We will use a dataset about the ratings of NYC restaurants from A to C
restaurants <- read_csv("https://data.cityofnewyork.us/api/views/43nn-pn8j/rows.csv")
# we drop some irrelevant variables, filter a few values, tidy variable names
rest_clean <-
restaurants %>%
janitor::clean_names() %>%
select(boro, cuisine_description, critical_flag, score, grade) %>%
drop_na() %>%
filter(grade %in% c("A", "B", "C")) %>%
filter(cuisine_description %in% c("African", "American", "Asian", "Latin", "Middle Eastern")) %>%
filter(boro %in% c ("Bronx", "Brooklyn", "Manhattan", "Queens", "Staten Island"))
view(rest_clean)
# dependent variable needs to be a factor
rest_clean <-
rest_clean %>%
mutate(grade = as.factor(grade),
cuisine_description = fct_relevel(cuisine_description, "American"))
#building the cumulative link model
# Comparing to American cousine, and the BRONX
clm1 <- clm(grade ~ cuisine_description + boro, data = rest_clean)
summary(clm1)
#running post-hoc tests
emmeans::emmeans(clm1, "cuisine_description", "boro")
# testing the model assumption, the proportional odd's ratio
#with either the nominal_test or scale_test function
nominal_test(clm1)
scale_test(clm1)
#let's plot our data
ggplot(rest_clean, aes(x = cuisine_description, y = grade)) +
geom_boxplot(size = .75) +
geom_jitter(alpha = .5) +
facet_wrap("boro") +
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/platform-tools.R
\name{gx.mapGenesToOntologies}
\alias{gx.mapGenesToOntologies}
\title{Runs the workflow \emph{Mapping to ontologies (Gene table)}}
\usage{
gx.mapGenesToOntologies(inputTable, species = "Human (Homo sapiens)",
resultFolder, skipCompleted = T, wait = T, verbose = F)
}
\arguments{
\item{inputTable}{input table with gene ids}
\item{species}{species of the input track genome}
\item{resultFolder}{path of result folder}
\item{skipCompleted}{skip already completed steps}
\item{wait}{set true to wait for the analysis to complete}
\item{verbose}{set true for more progress info}
}
\value{
the job id of the submitted task. The job id can be used to retrieve information about the status of the analysis.
}
\description{
Runs the workflow \emph{Mapping to ontologies (Gene table)}
}
\keyword{classification,}
\keyword{function}
\keyword{gene,}
\keyword{ontology,}
\keyword{workflow,}
|
/man/gx.mapGenesToOntologies.Rd
|
permissive
|
genexplain/geneXplainR
|
R
| false
| true
| 981
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/platform-tools.R
\name{gx.mapGenesToOntologies}
\alias{gx.mapGenesToOntologies}
\title{Runs the workflow \emph{Mapping to ontologies (Gene table)}}
\usage{
gx.mapGenesToOntologies(inputTable, species = "Human (Homo sapiens)",
resultFolder, skipCompleted = T, wait = T, verbose = F)
}
\arguments{
\item{inputTable}{input table with gene ids}
\item{species}{species of the input track genome}
\item{resultFolder}{path of result folder}
\item{skipCompleted}{skip already completed steps}
\item{wait}{set true to wait for the analysis to complete}
\item{verbose}{set true for more progress info}
}
\value{
the job id of the submitted task. The job id can be used to retrieve information about the status of the analysis.
}
\description{
Runs the workflow \emph{Mapping to ontologies (Gene table)}
}
\keyword{classification,}
\keyword{function}
\keyword{gene,}
\keyword{ontology,}
\keyword{workflow,}
|
# Interactive Command Line Input
# scan() reads string, interger, double, and complex
# From Console
# Return a vector
a <- scan("", what="")
b <- scan("", what=integer())
c <- scan("", what=double())
d <- scan("", what=complex())
# From File (2 columns: age & name)
# Return a list
x <- scan("useScan.txt", what=list(age=0, name=""))
|
/useScan.R
|
no_license
|
nurur/R-Programming
|
R
| false
| false
| 340
|
r
|
# Interactive Command Line Input
# scan() reads string, interger, double, and complex
# From Console
# Return a vector
a <- scan("", what="")
b <- scan("", what=integer())
c <- scan("", what=double())
d <- scan("", what=complex())
# From File (2 columns: age & name)
# Return a list
x <- scan("useScan.txt", what=list(age=0, name=""))
|
plot1 <- ggplot(first.letter.counts, aes(x = V1)) + geom_density()
ggsave(file.path('graphs', 'plot1.pdf'))
plot2 <- ggplot(second.letter.counts, aes(x = V1)) + geom_density()
ggsave(file.path('graphs', 'plot2.pdf'))
|
/src/generate_plots.R
|
no_license
|
amberlb/lesson
|
R
| false
| false
| 217
|
r
|
plot1 <- ggplot(first.letter.counts, aes(x = V1)) + geom_density()
ggsave(file.path('graphs', 'plot1.pdf'))
plot2 <- ggplot(second.letter.counts, aes(x = V1)) + geom_density()
ggsave(file.path('graphs', 'plot2.pdf'))
|
testlist <- list(A = structure(c(-8.55771479639722e-310, 1.8449077940702e-233, 2.46924759901144e-169, 1.5937832719625e-219, 1.37920627895459e-312, 4.02152936677188e-87, 9.12488123524439e+192, 0), .Dim = c(4L, 2L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613103589-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 320
|
r
|
testlist <- list(A = structure(c(-8.55771479639722e-310, 1.8449077940702e-233, 2.46924759901144e-169, 1.5937832719625e-219, 1.37920627895459e-312, 4.02152936677188e-87, 9.12488123524439e+192, 0), .Dim = c(4L, 2L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
# dvariable bound(const dvariable& x, const double& l, const double& h, const double& eps)
# {
# dvariable ret;
# if((x>=(l+eps))&&(x<=(h-eps))){
# ret=x;
# }else{
# if(x<(l+eps)){
# ret=eps*exp((x-(l+eps))/eps)+l;
# }else{
# if(x>(h-eps)){
# ret=h-eps*exp(((h-eps)-x)/eps);
# }
# }
# }
# return ret;
# }
bound<-function(x, l, h, eps)
{
if((x>=(l+eps))&&(x<=(h-eps)))
{
ret=x
}
else
{
if(x<(l+eps))
{
ret=eps*exp((x-(l+eps))/eps)+l
}
else
{
if(x>(h-eps))
{
ret=h-eps*exp(((h-eps)-x)/eps)
}
}
}
return(ret)
}
tbound<-function(x1,x2,l=0,h=10)
{
x<-seq(x1,x2,0.001)
y<-vector(length=length(x))
for (i in 1:length(x))
y[i]=bound(x[i],l,h,1e-3)
prange<-c(0.998*h,1.002*h)
plot(x,y,type='l',pch='+',xlim=prange,ylim=prange)
}
gF<-function(F,M)
{
g<-(F/(F+M))*(1-exp(-F-M))
return(g)
}
# g<-expression((F/(F+M))*(1-exp(-F-M)))
# D(g,"F")
#(1/(F + M) - F/(F + M)^2) * (1 - exp(-F - M)) + (F/(F + M)) * exp(-F - M)
dgdf<-function(F,M)
{
d <- (1/(F + M) - F/(F + M)^2) * (1 - exp(-F - M)) + (F/(F + M)) * exp(-F - M)
return(d)
}
BCE<-function(F,M,P)
{
C <- gF(F,M)*P
return(C)
}
NRF<-function(M,P,C,eps=1e-5)
{
maxit <- 25
it <- 0
F <- C/P
df <- 100*eps
while ((df > eps) && (F < 5))
{
g <- C/BCE(F,M,P) - 1.0
dg <- dgdf(F,M)*P
df <- g/dg
F <- F + df
it <- it + 1
# print(paste("iteration ", it, ", df = ", df,", F = ", F,sep=""))
if (it > maxit)
break;
}
return(F)
}
test_gdiff<-function(F=0.1,M=0.1, P=50,eps=1e-5)
{
tF0 <- seq(0,2,.1)
for (F in tF0)
{
C1<-BCE(F-eps,M,P)
C2<-BCE(F+eps,M,P)
ndcdf = (C2-C1)/(2.0*eps)
adcdf = dgdf(F,M)*P
print(paste(F,ndcdf,adcdf,(ndcdf-adcdf)))
}
}
testNRF1<-function(F=0.05, M=0.1, P=50)
{
C <- BCE(F, M, P)
print(paste(P,C, F))
estF0 <- NRF(M, P, C)
return(estF0)
}
testNRF<-function(M=0.1,P=5.35,eps=1e-5)
{
Cseq<-seq(1,10,1)
for (C in Cseq)
{
print("")
print(paste("**",C,P,sep=" "))
estF <- NRF(M, P, C, eps)
print(paste(" ",C,BCE(estF,M,P),estF,sep=" "))
}
}
######################################################
CE<-function(F1,F0,M,P)
{
z <- M + F1 +F0
C<-F0/z*(1.0-exp(-z))*P
return(C)
}
dce_df0<-function(F1,F0,M,P)
{
z <- M + F1 +F0
#dCdF0 = -F0*exp(-z)*P/z - (1.0-exp(-z))*P/z + F0*(1-exp(-z))*P/(z*z)
#dCdF0 = (((1/(F1 + F0 + M) - F0/(F1 + F0 + M)^2) * (1 - exp(-F1 - F0 - M)) + F0/(F1 + F0 + M) * exp(-F1 - F0 - M)) * P)
dCdF0 = (((1/z - F0/z^2) * (1 - exp(-z)) + F0/z * exp(-z)) * P)
return(dCdF0)
}
#R: D
# -(((1/(F1 + F0 + M) - F0/(F1 + F0 + M)^2) * (1 - exp(-F1 - F0 - M)) + F0/(F1 + F0 + M) * exp(-F1 - F0 - M)) * P)
# maxima:
#(%i1) diff(C-F/(F+M)*(1-exp(-(F+M)))*P,F);
# - M - F - M - F - M - F
# F %e P (1 - %e ) P F (1 - %e ) P
#(%o1) - ------------- - ----------------- + -------------------
# M + F M + F 2
# (M + F)
NR_F0<-function(F1,F0,M,P,C,it)
{
maxit <- 25
eps <- 1e-5
dx <- 1.0
it <- 0
while (dx > eps)
{
g <- C-CE(F1,F0,M,P)
dg <- dce_df0(F1,F0,M,P)
dx <- g/dg
F0 <- F0 + dx
it <- it + 1
print(paste("iteration ", it, ", dx = ", dx,", F0 = ", F0,sep=""))
if (it > maxit)
break;
}
return(F0)
}
testNR<-function(F1=0.1, F0=0.05, M=0.1, P=50)
{
C <- CE(F1, F0, M, P)
print(paste(P,C, F0))
estF0 <- NR_F0(F1,0.0, M, P, C, it)
return(estF0)
}
test_diff<-function(F1=0.1,M=0.1, P=50,eps=1e-5)
{
tF0 <- seq(0,2,.1)
for (F0 in tF0)
{
C1<-CE(F1,F0-eps,M,P)
C2<-CE(F1,F0+eps,M,P)
ndcdf = (C2-C1)/(2.0*eps)
adcdf = dce_df0(F1,F0,M,P)
print(paste(F0,ndcdf,adcdf,(ndcdf-adcdf)))
}
}
DD<-function()
{
g <- expression(C-F0/(F1+F0+M)*(1-exp(-F1-F0-M))*P)
dg <- D(g,"F0")
print(dg)
}
|
/25-alpha/scripts/baranov.R
|
permissive
|
johnrsibert/tagest
|
R
| false
| false
| 4,055
|
r
|
# dvariable bound(const dvariable& x, const double& l, const double& h, const double& eps)
# {
# dvariable ret;
# if((x>=(l+eps))&&(x<=(h-eps))){
# ret=x;
# }else{
# if(x<(l+eps)){
# ret=eps*exp((x-(l+eps))/eps)+l;
# }else{
# if(x>(h-eps)){
# ret=h-eps*exp(((h-eps)-x)/eps);
# }
# }
# }
# return ret;
# }
bound<-function(x, l, h, eps)
{
if((x>=(l+eps))&&(x<=(h-eps)))
{
ret=x
}
else
{
if(x<(l+eps))
{
ret=eps*exp((x-(l+eps))/eps)+l
}
else
{
if(x>(h-eps))
{
ret=h-eps*exp(((h-eps)-x)/eps)
}
}
}
return(ret)
}
tbound<-function(x1,x2,l=0,h=10)
{
x<-seq(x1,x2,0.001)
y<-vector(length=length(x))
for (i in 1:length(x))
y[i]=bound(x[i],l,h,1e-3)
prange<-c(0.998*h,1.002*h)
plot(x,y,type='l',pch='+',xlim=prange,ylim=prange)
}
gF<-function(F,M)
{
g<-(F/(F+M))*(1-exp(-F-M))
return(g)
}
# g<-expression((F/(F+M))*(1-exp(-F-M)))
# D(g,"F")
#(1/(F + M) - F/(F + M)^2) * (1 - exp(-F - M)) + (F/(F + M)) * exp(-F - M)
dgdf<-function(F,M)
{
d <- (1/(F + M) - F/(F + M)^2) * (1 - exp(-F - M)) + (F/(F + M)) * exp(-F - M)
return(d)
}
BCE<-function(F,M,P)
{
C <- gF(F,M)*P
return(C)
}
NRF<-function(M,P,C,eps=1e-5)
{
maxit <- 25
it <- 0
F <- C/P
df <- 100*eps
while ((df > eps) && (F < 5))
{
g <- C/BCE(F,M,P) - 1.0
dg <- dgdf(F,M)*P
df <- g/dg
F <- F + df
it <- it + 1
# print(paste("iteration ", it, ", df = ", df,", F = ", F,sep=""))
if (it > maxit)
break;
}
return(F)
}
test_gdiff<-function(F=0.1,M=0.1, P=50,eps=1e-5)
{
tF0 <- seq(0,2,.1)
for (F in tF0)
{
C1<-BCE(F-eps,M,P)
C2<-BCE(F+eps,M,P)
ndcdf = (C2-C1)/(2.0*eps)
adcdf = dgdf(F,M)*P
print(paste(F,ndcdf,adcdf,(ndcdf-adcdf)))
}
}
testNRF1<-function(F=0.05, M=0.1, P=50)
{
C <- BCE(F, M, P)
print(paste(P,C, F))
estF0 <- NRF(M, P, C)
return(estF0)
}
testNRF<-function(M=0.1,P=5.35,eps=1e-5)
{
Cseq<-seq(1,10,1)
for (C in Cseq)
{
print("")
print(paste("**",C,P,sep=" "))
estF <- NRF(M, P, C, eps)
print(paste(" ",C,BCE(estF,M,P),estF,sep=" "))
}
}
######################################################
CE<-function(F1,F0,M,P)
{
z <- M + F1 +F0
C<-F0/z*(1.0-exp(-z))*P
return(C)
}
dce_df0<-function(F1,F0,M,P)
{
z <- M + F1 +F0
#dCdF0 = -F0*exp(-z)*P/z - (1.0-exp(-z))*P/z + F0*(1-exp(-z))*P/(z*z)
#dCdF0 = (((1/(F1 + F0 + M) - F0/(F1 + F0 + M)^2) * (1 - exp(-F1 - F0 - M)) + F0/(F1 + F0 + M) * exp(-F1 - F0 - M)) * P)
dCdF0 = (((1/z - F0/z^2) * (1 - exp(-z)) + F0/z * exp(-z)) * P)
return(dCdF0)
}
#R: D
# -(((1/(F1 + F0 + M) - F0/(F1 + F0 + M)^2) * (1 - exp(-F1 - F0 - M)) + F0/(F1 + F0 + M) * exp(-F1 - F0 - M)) * P)
# maxima:
#(%i1) diff(C-F/(F+M)*(1-exp(-(F+M)))*P,F);
# - M - F - M - F - M - F
# F %e P (1 - %e ) P F (1 - %e ) P
#(%o1) - ------------- - ----------------- + -------------------
# M + F M + F 2
# (M + F)
NR_F0<-function(F1,F0,M,P,C,it)
{
maxit <- 25
eps <- 1e-5
dx <- 1.0
it <- 0
while (dx > eps)
{
g <- C-CE(F1,F0,M,P)
dg <- dce_df0(F1,F0,M,P)
dx <- g/dg
F0 <- F0 + dx
it <- it + 1
print(paste("iteration ", it, ", dx = ", dx,", F0 = ", F0,sep=""))
if (it > maxit)
break;
}
return(F0)
}
testNR<-function(F1=0.1, F0=0.05, M=0.1, P=50)
{
C <- CE(F1, F0, M, P)
print(paste(P,C, F0))
estF0 <- NR_F0(F1,0.0, M, P, C, it)
return(estF0)
}
test_diff<-function(F1=0.1,M=0.1, P=50,eps=1e-5)
{
tF0 <- seq(0,2,.1)
for (F0 in tF0)
{
C1<-CE(F1,F0-eps,M,P)
C2<-CE(F1,F0+eps,M,P)
ndcdf = (C2-C1)/(2.0*eps)
adcdf = dce_df0(F1,F0,M,P)
print(paste(F0,ndcdf,adcdf,(ndcdf-adcdf)))
}
}
DD<-function()
{
g <- expression(C-F0/(F1+F0+M)*(1-exp(-F1-F0-M))*P)
dg <- D(g,"F0")
print(dg)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/route53_operations.R
\name{route53_list_tags_for_resource}
\alias{route53_list_tags_for_resource}
\title{Lists tags for one health check or hosted zone}
\usage{
route53_list_tags_for_resource(ResourceType, ResourceId)
}
\arguments{
\item{ResourceType}{[required] The type of the resource.
\itemize{
\item The resource type for health checks is \code{healthcheck}.
\item The resource type for hosted zones is \code{hostedzone}.
}}
\item{ResourceId}{[required] The ID of the resource for which you want to retrieve tags.}
}
\description{
Lists tags for one health check or hosted zone.
See \url{https://www.paws-r-sdk.com/docs/route53_list_tags_for_resource/} for full documentation.
}
\keyword{internal}
|
/cran/paws.networking/man/route53_list_tags_for_resource.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 783
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/route53_operations.R
\name{route53_list_tags_for_resource}
\alias{route53_list_tags_for_resource}
\title{Lists tags for one health check or hosted zone}
\usage{
route53_list_tags_for_resource(ResourceType, ResourceId)
}
\arguments{
\item{ResourceType}{[required] The type of the resource.
\itemize{
\item The resource type for health checks is \code{healthcheck}.
\item The resource type for hosted zones is \code{hostedzone}.
}}
\item{ResourceId}{[required] The ID of the resource for which you want to retrieve tags.}
}
\description{
Lists tags for one health check or hosted zone.
See \url{https://www.paws-r-sdk.com/docs/route53_list_tags_for_resource/} for full documentation.
}
\keyword{internal}
|
#' @importFrom Rcpp evalCpp
#' @useDynLib deepwalker
NULL
|
/R/pkg.R
|
permissive
|
jwijffels/deepwalker
|
R
| false
| false
| 58
|
r
|
#' @importFrom Rcpp evalCpp
#' @useDynLib deepwalker
NULL
|
library(shiny)
library(plyr)
library(leaflet)
library(reshape2)
library(tidyr)
library(rpivotTable)
library(dplyr)
library(jsonlite)
library(rgdal)
library(RJSONIO)
library(tibble)
library(stringr)
library(sp)
library(maps)
library(maptools)
library(geojsonio)
library(ggplot2)
library(shinydashboard)
library(rjson)
library(DT)
library(xlsx)
library(readxl)
#install.packages("rjson")
## This tool was created by Brian Avant
## The purpose of this tool is to screen GKM related datasets containing metals concentrations in the water column against water quality standards for specific areas.
## Read in screening critiera and sample data
setwd("C:/Users/bavant/Dropbox/WQScreen") #work /Git/WQScreen
# setwd("C:/Users/Brian/Dropbox/WQScreen") #laptop wd
WQCritSS <- read_excel("WQ Criteria and Sample Templates.xlsx", sheet = "WQCriteriaTot")
WQCritHardness <- read_excel("WQ Criteria and Sample Templates.xlsx", sheet = "WQCriteriawHardness")
## Reformat WQ Screening Criteria
WQCritSS_clean <- WQCritSS %>%
gather(variable, value, -c(Designated_Use,ScreenType,NAME,Spatial_Type,Sample_Type)) %>%
filter(complete.cases(.))
namevector <- c("maSlope","mbIntercept", "alphaBeta", "conversionFactor", "alpha", "beta")
WQCritSS_clean[,namevector] <- NA
WQCritAll <- bind_rows(WQCritSS_clean,WQCritHardness)
## Create Output data.frames
rows <- nrow(WQCritSS_clean)
Samplemarkerlayer <- data.frame(Date_Time = character(rows*10),
Sample_No = character(rows*10),
Designated_Use = character(rows*10),
Sp_Layer = character(rows*10),
ScreenType = character(rows*10),
Lat = numeric(rows*10),
Lon = numeric(rows*10),
NAME = character(rows*10),
Sample_Type = character(rows*10),
CritMetal = character(rows*10),
CalcValue = numeric(rows*10),
SampleValue = numeric(rows*10),
ObsMetal = character(rows*10),
stringsAsFactors=FALSE)
#################### Load GEOJSONs and Merge Criteria Data #####################
statesJSON <- readOGR(dsn="selected_states.geojson", layer = "selected_states", verbose = FALSE)
#statesJSON <- readOGR("selected_states.geojson", "OGRGeoJSON", verbose = FALSE) #selected_
states <- map(statesJSON, fill=TRUE, col="transparent", plot=FALSE)
StateIDs <- sapply(strsplit(states$names, ":"), function(x) x[1])
states_sp <- map2SpatialPolygons(states, IDs=StateIDs,
proj4string=CRS("+proj=longlat +datum=WGS84"))
tribesJSON <- readOGR(dsn="tribes.geojson", layer = "tribes", verbose = FALSE)
#tribesJSON <- readOGR("tribes.geojson", "OGRGeoJSON", verbose = FALSE)
tribesmap <- map(tribesJSON, fill=TRUE, col="transparent", plot=FALSE)
TribesIDs <- sapply(strsplit(tribesmap$names, ":"), function(x) x[1])
tribes_sp <- map2SpatialPolygons(tribesmap, IDs=TribesIDs,
proj4string=CRS("+proj=longlat +datum=WGS84"))
regionsJSON <- readOGR(dsn="EPA_regions.geojson", layer = "EPA_regions", verbose = FALSE)
#regionsJSON <- readOGR("EPA_regions.geojson", "OGRGeoJSON", verbose = FALSE)
regions <- map(regionsJSON, fill=TRUE, col="transparent", plot=FALSE)
RegionsIDs <- sapply(strsplit(regions$names, ":"), function(x) x[1])
regions_sp <- map2SpatialPolygons(regions, IDs=RegionsIDs,
proj4string=CRS("+proj=longlat +datum=WGS84"))
### latlong Conversion Function #######################################################
latlong2state <- function(pointsDF) {
## Convert pointsDF to a SpatialPoints object
pointsSP <- SpatialPoints(pointsDF,
proj4string=CRS("+proj=longlat +datum=WGS84"))
## Use 'over' to get _indices_ of the Polygons object containing each point
states_indices <- over(pointsSP, states_sp)
## Return the state names of the Polygons object containing each point
stateNames <- sapply(states_sp@polygons, function(x) x@ID)
stateNames[states_indices]
}
latlong2tribe <- function(pointsDF) {
## Convert pointsDF to a SpatialPoints object
pointsSP <- SpatialPoints(pointsDF,
proj4string=CRS("+proj=longlat +datum=WGS84"))
## Use 'over' to get _indices_ of the Polygons object containing each point
tribes_indices <- over(pointsSP, tribes_sp)
## Return the state names of the Polygons object containing each point
tribeNames <- sapply(tribes_sp@polygons, function(x) x@ID)
tribeNames[tribes_indices]
}
latlong2region <- function(pointsDF) {
## Convert pointsDF to a SpatialPoints object
pointsSP <- SpatialPoints(pointsDF,
proj4string=CRS("+proj=longlat +datum=WGS84"))
## Use 'over' to get _indices_ of the Polygons object containing each point
regions_indices <- over(pointsSP, regions_sp)
## Return the state names of the Polygons object containing each point
regionNames <- sapply(regions_sp@polygons, function(x) x@ID)
regionNames[regions_indices]
}
####################################################################################
m=0
n=0
g=0
#df <- read.table("C:/Users/bavant/Dropbox/WQScreen/ObservedData_CurrentNDsasZero_HistNDsasLim_latlon_partial2.txt", header = TRUE, sep = "\t",stringsAsFactors=FALSE)
df <- read.csv("C:/Users/bavant/Dropbox/WQScreen/GKM All Samples by Named Location.csv", header = TRUE, sep = ",",stringsAsFactors=FALSE)
#df <- read.csv("C:/Users/Brian/Dropbox/WQScreen/New 2017 data for screening.csv", header = TRUE, sep = ",",stringsAsFactors=FALSE) #laptop
if (input$Spatialdist == "LatLon") { #lat lon version
## Sample Sites
samplemarkers <- select(df, c(Lon,Lat,Samp_No))
#write.csv(df,"samplemarkers.csv", row.names=FALSE)
## Collect relevant spatial boundaries from sample lat lon
samplecoords <- select(df, c(Lon,Lat))
Spatial_Boundstate <- str_to_title(latlong2state(samplecoords))
Spatial_Boundregion <- str_to_title(latlong2region(samplecoords))
Spatial_Boundtribe <- str_to_title(latlong2tribe(samplecoords))
## add States column to sample data and remove NAs
ObsSpatial_BoundsStatena <- add_column(df, Spatial_Boundstate, .after = 1)
ObsSpatial_BoundsState <- complete.cases(ObsSpatial_BoundsStatena[,2])
ObsAllSpatial_BoundsState <- ObsSpatial_BoundsStatena[ObsSpatial_BoundsState, ]
States_Layer <- add_column(ObsAllSpatial_BoundsState, Sp_Layer = "States", .after = 2)
colnames(States_Layer)[2] <- "NAME"
## add EPA Region column to sample data and remove NAs
ObsSpatial_BoundsRegionna <- add_column(df, Spatial_Boundregion, .after = 1)
ObsSpatial_BoundsRegion <- complete.cases(ObsSpatial_BoundsRegionna[,2])
ObsAllSpatial_BoundsRegion <- ObsSpatial_BoundsRegionna[ObsSpatial_BoundsRegion, ]
Regions_Layer <- add_column(ObsAllSpatial_BoundsRegion, Sp_Layer = "Regions", .after = 2)
colnames(Regions_Layer)[2] <- "NAME"
## add Tribe column to sample data and remove NAs
ObsSpatial_BoundsTribena <- add_column(df, Spatial_Boundtribe, .after = 1)
ObsSpatial_BoundsTribe <- complete.cases(ObsSpatial_BoundsTribena[,2])
ObsAllSpatial_BoundsTribe <- ObsSpatial_BoundsTribena[ObsSpatial_BoundsTribe, ]
Tribes_Layer <- add_column(ObsAllSpatial_BoundsTribe, Sp_Layer = "Tribes", .after = 2)
colnames(Tribes_Layer)[2] <- "NAME"
## append all sample boundaries to one df
ObsAllSpatial_Bounds <- rbind(States_Layer,Regions_Layer,Tribes_Layer)
} else { # instead of lat lon user provides columns declaring spatial boundaries
df2 <- add_column(df, Sp_Layer = "State", Lat = NA, Lon = NA, .after = 2)
Tribes_col <- df2[complete.cases(df2$Tribe),]
if (nrow(Tribes_col) > 0) {Tribes_col$Sp_Layer <- "Tribes"}
Tribes_col2 <- df2[complete.cases(df2$Secondary_Tribe),]
if (nrow(Tribes_col2) > 0) {Tribes_col2$Sp_Layer <- "Tribes"}
Regions_col <- df2[complete.cases(df2$Region),]
if (nrow(Regions_col) > 0) {Regions_col$Sp_Layer <- "Regions"}
names(Tribes_col)[names(Tribes_col)=="Tribe"] <- "NAME"
names(Tribes_col2)[names(Tribes_col2)=="Secondary_Tribe"] <- "NAME"
names(Regions_col)[names(Regions_col)=="Regions"] <- "NAME"
names(df2)[names(df2)=="State"] <- "NAME"
ObsAllSpatial_Bounds <- rbind(df2[, -which(names(df2) %in% c("Tribe","Secondary_Tribe","Region"))],
Regions_col[, -which(names(Regions_col) %in% c("State","Tribe","Secondary_Tribe"))],
Tribes_col[, -which(names(Tribes_col) %in% c("State","Secondary_Tribe","Region"))],
Tribes_col2[, -which(names(Tribes_col2) %in% c("State","Tribe","Region"))])
ObsAllSpatial_Bounds <- filter(ObsAllSpatial_Bounds, NAME != "")
}
## Cap hardness values based on specific criteria
obsCapped <- within(ObsAllSpatial_Bounds, Hardness[Hardness>400] <- 400) #Maximum hardness of 400 mg/L for most criteria in the region
index <- 1 + which(colnames(obsCapped)=="Hardness" )
samples_long <- gather(obsCapped, "variable", "conc", index:ncol(obsCapped))
#if (input$Categories==TRUE) {
GroupCategories <- colnames(samples_long) [(which(colnames(samples_long)=="Lon")+1):(which(colnames(samples_long)=="Hardness")-1)]
ScreenCategories <- c(GroupCategories, "variable")
samples_long <- samples_long %>% mutate(Sample_Type = ifelse(NAME=="New Mexico" & Sample_Type=="Total" & variable=="Aluminum",
"Total Recoverable",
Sample_Type))
UniqueObs <- unique(samples_long[ScreenCategories])
OutputCategories <- c("Designated_Use","ScreenType",GroupCategories,"Metal","Times_Exceeded","Number_Screened")
output_screen <- data.frame(matrix(ncol = length(OutputCategories), nrow = rows),
stringsAsFactors=FALSE)
names(output_screen) <- OutputCategories
output_screen[,OutputCategories] <- lapply(output_screen[,OutputCategories],as.character)
output_screen$Times_Exceeded <- as.numeric(output_screen$Times_Exceeded)
output_screen$Number_Screened <- as.numeric(output_screen$Number_Screened)
output_screen$Times_Exceeded[is.na(output_screen$Times_Exceeded)] <- 0
output_screen$Number_Screened[is.na(output_screen$Number_Screened)] <- 0
output_screen[is.na(output_screen)] <- ""
# } else {
# for (i in 1:nrow(samples_long)) {
# samples_long$Sample_Type[i] = ifelse(samples_long$NAME[i]=="New Mexico" &
# samples_long$Sample_Type[i]=="Total" &
# samples_long$variable[i]=="Aluminum",
# "Total Recoverable",
# samples_long$Sample_Type[i])}
# UniqueObs <- unique(samples_long[c("NAME","Sample_Type", "variable")])
# output_screen <- data.frame(Designated_Use = character(rows),
# ScreenType = character(rows),
# NAME = character(rows),
# Sample_Type = character(rows),
# Metal = character(rows),
# Times_Exceeded = numeric(rows),
# Number_Screened = numeric(rows),
# stringsAsFactors=FALSE)
#}
## This is the main function of the tool. For each sample the applicable screening criteria are identified and used to
## determine the number of times a WQ criteria has been exceeded for a specific screen.
for (i in 1:nrow(UniqueObs)) { #loops through each sample by unique combinations of region, conc type(row), and metal
print(UniqueObs[i,])
screen <- filter(WQCritAll, NAME==UniqueObs$NAME[i], #iteratively queries WQ criteria based on sample data (sample & metal)
variable==UniqueObs$variable[i],
Sample_Type==UniqueObs$Sample_Type[i])
if (length(screen$value) > 0){
#if (input$Categories==TRUE) { # Converts designated columns into Categories
filtercolumns <- which((names(samples_long) %in% names(UniqueObs[i,]))==TRUE)
filt1 <- NULL
filt2 <- NULL
filtervar <- NULL
for (l in 1:length(filtercolumns)){ # generates variable with string to pass to filter_
filt1[l] <- names(samples_long[filtercolumns[l]])
filt2[l] <-UniqueObs[i,l]
filtervar[l] <-paste(filt1[l],"==","'",filt2[l],"'", sep="")
}
tempSamples <- samples_long %>% filter(UQ(rlang::sym(filt1[1]))==filt2[1]) %>%
filter(UQ(rlang::sym(filt1[2]))==filt2[2]) %>%
filter(UQ(rlang::sym(filt1[3]))==filt2[3]) %>%
filter(UQ(rlang::sym(filt1[4]))==filt2[4]) %>%
filter(UQ(rlang::sym(filt1[5]))==filt2[5])
# } else {
# tempSamples <- filter(samples_long, NAME==UniqueObs$NAME[i], Sample_Type==UniqueObs$Sample_Type[i], variable == UniqueObs$variable[i]) #subset observed data by unique combination
#}
if (UniqueObs$NAME[i]=="New Mexico" &
UniqueObs$Sample_Type[i]=="Total" &
UniqueObs$variable[i]=="Aluminum") { #New Mexico hardness limit for total Al = 220 mg/L
tempSamples <- tempSamples %>% within(Hardness[Hardness>220] <- 220) %>%
mutate(Sample_Type = "Total Recoverable",
conc = conc*0.31)
}
for (b in 1:length(screen$ScreenType)) { #loop through matching screens
if (!is.na(screen$maSlope[b]==TRUE)) { #find screens that need to be calculated based on hardness
aquatic_screen <- data.frame(Date_Time = character(nrow(tempSamples)),
Sample_No = character(nrow(tempSamples)),
Designated_Use = character(nrow(tempSamples)),
Sp_Layer = character(nrow(tempSamples)),
ScreenType = character(nrow(tempSamples)),
Lat = numeric(nrow(tempSamples)),
Lon = numeric(nrow(tempSamples)),
NAME = character(nrow(tempSamples)),
Sample_Type = character(nrow(tempSamples)),
CritMetal = character(nrow(tempSamples)),
CalcValue = numeric(nrow(tempSamples)),
SampleValue = numeric(nrow(tempSamples)),
ObsMetal = character(nrow(tempSamples)),
stringsAsFactors=FALSE)
g=1
if (screen$alphaBeta[b] == 0) { #calculator function 1
for (y in 1:nrow(tempSamples)) { #iterate through each sample
screen$value[b] <- as.numeric((exp((screen$maSlope[b]*log(tempSamples$Hardness[y]))+screen$mbIntercept[b])*screen$conversionFactor[b])/1000) #calculate criteria
aquatic_screen[g,] <- c(tempSamples$Date_Time[y],
tempSamples$Samp_No[y],
screen$Designated_Use[b],
tempSamples$Sp_Layer[y],
screen$ScreenType[b],
tempSamples$Lat[y],
tempSamples$Lon[y],
screen$NAME[b],
screen$Sample_Type[b],
screen$variable[b],
as.numeric(screen$value[b]),
as.numeric(tempSamples$conc[y]),
tempSamples$variable[y]) #collect criteria and sample value (for screen eval)
aquatic_screen[, c(11:12)] <- sapply(aquatic_screen[, c(11:12)], as.numeric)
g=g+1
}
} else if (screen$alphaBeta[b] == 1) { #calculator function 2
for (z in 1:nrow(tempSamples)) { #iterate through each sample
screen$value[b] <- as.numeric((exp((screen$maSlope[b]*log(tempSamples$Hardness[z])+screen$mbIntercept[b]))*(screen$alpha[b]-(log(tempSamples$Hardness[z])*screen$beta[b])))/1000) #calculate criteria
aquatic_screen[g,] <- c(tempSamples$Date_Time[z],
tempSamples$Samp_No[z],
screen$Designated_Use[b],
tempSamples$Sp_Layer[z],
screen$ScreenType[b],
tempSamples$Lat[z],
tempSamples$Lon[z],
screen$NAME[b],
screen$Sample_Type[b],
screen$variable[b],
as.numeric(screen$value[b]),
as.numeric(tempSamples$conc[z]),
tempSamples$variable[z]) #collect criteria and sample value (for screen eval)
aquatic_screen[, c(11:12)] <- sapply(aquatic_screen[, c(11:12)], as.numeric)
g=g+1
}
} else {
cat("Something went wrong with the hardness calculator.", "The error occured calculating the screening criteria for",screen$Sample_Type[b],
screen$variable[b], "using the",screen$ScreenType[b], "screen for",screen$NAME[b])
}
aquatic_screen_cleaned <- filter(aquatic_screen, CalcValue >= 0 & SampleValue >= 0 & CritMetal != "") #remove empty rows in data.frame
n_screened <- nrow(aquatic_screen_cleaned) #count the number of samples that are screened
n_screened[is.null(n_screened)] <- -50
if (n_screened > 0) {
metal_vector_exceedances <- aquatic_screen_cleaned[which(aquatic_screen_cleaned$SampleValue > aquatic_screen_cleaned$CalcValue),] #filter criteria with exceedances
metal_exceedance_count <- nrow(metal_vector_exceedances) #count exceedances
m=m+1
for (f in 1:nrow(aquatic_screen_cleaned)) {
n=n+1
Samplemarkerlayer[n,] <- aquatic_screen_cleaned[f,]
}
# if (input$Categories==TRUE) {
nCategories <- (UniqueObs[,c(-1,-ncol(UniqueObs))])
screenvars1 <- c(screen$Designated_Use[b],
screen$ScreenType[b],
screen$NAME[b])
screenvars2 <- NULL
for (x in 1:length(GroupCategories[-length(GroupCategories)])) {
screenvars2[x] <- if (length(GroupCategories[-length(GroupCategories)])>1) {nCategories[i,x]} else {nCategories[i]}
}
screenvars3 <- c(screen$variable[b],
metal_exceedance_count,
n_screened)
screenvarTot <- c(screenvars1,screenvars2,screenvars3)
output_screen[m,] <- screenvarTot
# } else {
# output_screen[m,] <- c(screen$Designated_Use[b],
# screen$ScreenType[b],
# screen$NAME[b],
# screen$Sample_Type[b],
# screen$variable[b],
# metal_exceedance_count,
# n_screened)
# }
}
} else {
if (!all(is.na(tempSamples$conc))) { #distinguishes between a non-detect sample and no sample
metal_vector_nonas <- tempSamples[!is.na(tempSamples$conc),]#remove NAs
num_metal_samples <- nrow(metal_vector_nonas) #count the number of samples that are screened
if(is.null(num_metal_samples)){num_metal_samples <- -50}
if (num_metal_samples > 0) {
metal_vector_exceedances <- metal_vector_nonas[which(metal_vector_nonas$conc>screen$value[b]),] #filter criteria with exceedances
metal_exceedance_count <- nrow(metal_vector_exceedances) #count exceedances
m=m+1
for (t in 1:num_metal_samples) {
n=n+1
Samplemarkerlayer[n,] <- c(metal_vector_nonas$Date_Time[t],
metal_vector_nonas$Samp_No[t],
screen$Designated_Use[b],
metal_vector_nonas$Sp_Layer[t],
screen$ScreenType[b],
metal_vector_nonas$Lat[t],
metal_vector_nonas$Lon[t],
metal_vector_nonas$NAME[t],
metal_vector_nonas$Sample_Type[t],
screen$variable[b],
as.numeric(screen$value[b]),
as.numeric(metal_vector_nonas$conc[t]),
unique(tempSamples$variable))
}
#if (input$Categories==TRUE) {
nCategories <- (UniqueObs[,c(-1,-ncol(UniqueObs))])
screenvars1 <- c(screen$Designated_Use[b],
screen$ScreenType[b],
screen$NAME[b])
screenvars2 <- NULL
for (x in 1:length(GroupCategories[-length(GroupCategories)])) {
screenvars2[x] <- if (length(GroupCategories[-length(GroupCategories)])>1) {nCategories[i,x]} else {nCategories[i]}
}
screenvars3 <- c(screen$variable[b],
metal_exceedance_count,
num_metal_samples)
screenvarTot <- c(screenvars1,screenvars2,screenvars3)
output_screen[m,] <- screenvarTot
# } else {
# output_screen[m,] <- c(screen$Designated_Use[b],
# screen$ScreenType[b],
# screen$NAME[b],
# screen$Sample_Type[b],
# screen$variable[b],
# metal_exceedance_count,
# num_metal_samples)
#}
}
}
}
}
} else {
cat(UniqueObs$Sample_Type[i],
UniqueObs$variable,
UniqueObs$NAME[i],
file="echoFile.txt", append=TRUE)
}
}
#start_time <- Sys.time()
#sleep_for_a_minute()
#end_time <- Sys.time()
#end_time - start_time
output_screen <- filter(output_screen, ScreenType!="")
output_screen$Times_Exceeded <- as.numeric(output_screen$Times_Exceeded)
output_screen_Exceeded <- filter(output_screen, Times_Exceeded > 0)
#write.csv(WQCritAll,file="WQCritAll.csv", row.names=FALSE)
write.csv(output_screen,file="Reload1_file_nonshinyapp.csv", row.names=FALSE)
if (exists("Samplemarkerlayer")){
samplemarkers_screen <- filter(Samplemarkerlayer, ScreenType!="") %>%
mutate(SampleValue = as.numeric(SampleValue),
CalcValue = as.numeric(CalcValue),
Difference = SampleValue/CalcValue,
Type = ifelse(Difference < 1,"NotExceeded","Exceeded"),
Lat = as.numeric(Lat),
Lon = as.numeric(Lon),
#Date_Time = as.POSIXct(Date_Time,format = '%m/%d/%Y %H:%M'), #,usetz = FALSE
Date_Time = format(as.POSIXct(Date_Time,format = '%m/%d/%Y %H:%M'),format='%Y-%m-%d')) # %H:%M
write.csv(samplemarkers_screen,file="Samplelatlondiferences.csv",row.names = FALSE)
}
|
/WQScreening Tool non-Shiny.R
|
permissive
|
quanted/wq_screen
|
R
| false
| false
| 27,177
|
r
|
library(shiny)
library(plyr)
library(leaflet)
library(reshape2)
library(tidyr)
library(rpivotTable)
library(dplyr)
library(jsonlite)
library(rgdal)
library(RJSONIO)
library(tibble)
library(stringr)
library(sp)
library(maps)
library(maptools)
library(geojsonio)
library(ggplot2)
library(shinydashboard)
library(rjson)
library(DT)
library(xlsx)
library(readxl)
#install.packages("rjson")
## This tool was created by Brian Avant
## The purpose of this tool is to screen GKM related datasets containing metals concentrations in the water column against water quality standards for specific areas.
## Read in screening critiera and sample data
setwd("C:/Users/bavant/Dropbox/WQScreen") #work /Git/WQScreen
# setwd("C:/Users/Brian/Dropbox/WQScreen") #laptop wd
WQCritSS <- read_excel("WQ Criteria and Sample Templates.xlsx", sheet = "WQCriteriaTot")
WQCritHardness <- read_excel("WQ Criteria and Sample Templates.xlsx", sheet = "WQCriteriawHardness")
## Reformat WQ Screening Criteria
WQCritSS_clean <- WQCritSS %>%
gather(variable, value, -c(Designated_Use,ScreenType,NAME,Spatial_Type,Sample_Type)) %>%
filter(complete.cases(.))
namevector <- c("maSlope","mbIntercept", "alphaBeta", "conversionFactor", "alpha", "beta")
WQCritSS_clean[,namevector] <- NA
WQCritAll <- bind_rows(WQCritSS_clean,WQCritHardness)
## Create Output data.frames
rows <- nrow(WQCritSS_clean)
Samplemarkerlayer <- data.frame(Date_Time = character(rows*10),
Sample_No = character(rows*10),
Designated_Use = character(rows*10),
Sp_Layer = character(rows*10),
ScreenType = character(rows*10),
Lat = numeric(rows*10),
Lon = numeric(rows*10),
NAME = character(rows*10),
Sample_Type = character(rows*10),
CritMetal = character(rows*10),
CalcValue = numeric(rows*10),
SampleValue = numeric(rows*10),
ObsMetal = character(rows*10),
stringsAsFactors=FALSE)
#################### Load GEOJSONs and Merge Criteria Data #####################
statesJSON <- readOGR(dsn="selected_states.geojson", layer = "selected_states", verbose = FALSE)
#statesJSON <- readOGR("selected_states.geojson", "OGRGeoJSON", verbose = FALSE) #selected_
states <- map(statesJSON, fill=TRUE, col="transparent", plot=FALSE)
StateIDs <- sapply(strsplit(states$names, ":"), function(x) x[1])
states_sp <- map2SpatialPolygons(states, IDs=StateIDs,
proj4string=CRS("+proj=longlat +datum=WGS84"))
tribesJSON <- readOGR(dsn="tribes.geojson", layer = "tribes", verbose = FALSE)
#tribesJSON <- readOGR("tribes.geojson", "OGRGeoJSON", verbose = FALSE)
tribesmap <- map(tribesJSON, fill=TRUE, col="transparent", plot=FALSE)
TribesIDs <- sapply(strsplit(tribesmap$names, ":"), function(x) x[1])
tribes_sp <- map2SpatialPolygons(tribesmap, IDs=TribesIDs,
proj4string=CRS("+proj=longlat +datum=WGS84"))
regionsJSON <- readOGR(dsn="EPA_regions.geojson", layer = "EPA_regions", verbose = FALSE)
#regionsJSON <- readOGR("EPA_regions.geojson", "OGRGeoJSON", verbose = FALSE)
regions <- map(regionsJSON, fill=TRUE, col="transparent", plot=FALSE)
RegionsIDs <- sapply(strsplit(regions$names, ":"), function(x) x[1])
regions_sp <- map2SpatialPolygons(regions, IDs=RegionsIDs,
proj4string=CRS("+proj=longlat +datum=WGS84"))
### latlong Conversion Function #######################################################
latlong2state <- function(pointsDF) {
## Convert pointsDF to a SpatialPoints object
pointsSP <- SpatialPoints(pointsDF,
proj4string=CRS("+proj=longlat +datum=WGS84"))
## Use 'over' to get _indices_ of the Polygons object containing each point
states_indices <- over(pointsSP, states_sp)
## Return the state names of the Polygons object containing each point
stateNames <- sapply(states_sp@polygons, function(x) x@ID)
stateNames[states_indices]
}
latlong2tribe <- function(pointsDF) {
## Convert pointsDF to a SpatialPoints object
pointsSP <- SpatialPoints(pointsDF,
proj4string=CRS("+proj=longlat +datum=WGS84"))
## Use 'over' to get _indices_ of the Polygons object containing each point
tribes_indices <- over(pointsSP, tribes_sp)
## Return the state names of the Polygons object containing each point
tribeNames <- sapply(tribes_sp@polygons, function(x) x@ID)
tribeNames[tribes_indices]
}
latlong2region <- function(pointsDF) {
## Convert pointsDF to a SpatialPoints object
pointsSP <- SpatialPoints(pointsDF,
proj4string=CRS("+proj=longlat +datum=WGS84"))
## Use 'over' to get _indices_ of the Polygons object containing each point
regions_indices <- over(pointsSP, regions_sp)
## Return the state names of the Polygons object containing each point
regionNames <- sapply(regions_sp@polygons, function(x) x@ID)
regionNames[regions_indices]
}
####################################################################################
m=0
n=0
g=0
#df <- read.table("C:/Users/bavant/Dropbox/WQScreen/ObservedData_CurrentNDsasZero_HistNDsasLim_latlon_partial2.txt", header = TRUE, sep = "\t",stringsAsFactors=FALSE)
df <- read.csv("C:/Users/bavant/Dropbox/WQScreen/GKM All Samples by Named Location.csv", header = TRUE, sep = ",",stringsAsFactors=FALSE)
#df <- read.csv("C:/Users/Brian/Dropbox/WQScreen/New 2017 data for screening.csv", header = TRUE, sep = ",",stringsAsFactors=FALSE) #laptop
if (input$Spatialdist == "LatLon") { #lat lon version
## Sample Sites
samplemarkers <- select(df, c(Lon,Lat,Samp_No))
#write.csv(df,"samplemarkers.csv", row.names=FALSE)
## Collect relevant spatial boundaries from sample lat lon
samplecoords <- select(df, c(Lon,Lat))
Spatial_Boundstate <- str_to_title(latlong2state(samplecoords))
Spatial_Boundregion <- str_to_title(latlong2region(samplecoords))
Spatial_Boundtribe <- str_to_title(latlong2tribe(samplecoords))
## add States column to sample data and remove NAs
ObsSpatial_BoundsStatena <- add_column(df, Spatial_Boundstate, .after = 1)
ObsSpatial_BoundsState <- complete.cases(ObsSpatial_BoundsStatena[,2])
ObsAllSpatial_BoundsState <- ObsSpatial_BoundsStatena[ObsSpatial_BoundsState, ]
States_Layer <- add_column(ObsAllSpatial_BoundsState, Sp_Layer = "States", .after = 2)
colnames(States_Layer)[2] <- "NAME"
## add EPA Region column to sample data and remove NAs
ObsSpatial_BoundsRegionna <- add_column(df, Spatial_Boundregion, .after = 1)
ObsSpatial_BoundsRegion <- complete.cases(ObsSpatial_BoundsRegionna[,2])
ObsAllSpatial_BoundsRegion <- ObsSpatial_BoundsRegionna[ObsSpatial_BoundsRegion, ]
Regions_Layer <- add_column(ObsAllSpatial_BoundsRegion, Sp_Layer = "Regions", .after = 2)
colnames(Regions_Layer)[2] <- "NAME"
## add Tribe column to sample data and remove NAs
ObsSpatial_BoundsTribena <- add_column(df, Spatial_Boundtribe, .after = 1)
ObsSpatial_BoundsTribe <- complete.cases(ObsSpatial_BoundsTribena[,2])
ObsAllSpatial_BoundsTribe <- ObsSpatial_BoundsTribena[ObsSpatial_BoundsTribe, ]
Tribes_Layer <- add_column(ObsAllSpatial_BoundsTribe, Sp_Layer = "Tribes", .after = 2)
colnames(Tribes_Layer)[2] <- "NAME"
## append all sample boundaries to one df
ObsAllSpatial_Bounds <- rbind(States_Layer,Regions_Layer,Tribes_Layer)
} else { # instead of lat lon user provides columns declaring spatial boundaries
df2 <- add_column(df, Sp_Layer = "State", Lat = NA, Lon = NA, .after = 2)
Tribes_col <- df2[complete.cases(df2$Tribe),]
if (nrow(Tribes_col) > 0) {Tribes_col$Sp_Layer <- "Tribes"}
Tribes_col2 <- df2[complete.cases(df2$Secondary_Tribe),]
if (nrow(Tribes_col2) > 0) {Tribes_col2$Sp_Layer <- "Tribes"}
Regions_col <- df2[complete.cases(df2$Region),]
if (nrow(Regions_col) > 0) {Regions_col$Sp_Layer <- "Regions"}
names(Tribes_col)[names(Tribes_col)=="Tribe"] <- "NAME"
names(Tribes_col2)[names(Tribes_col2)=="Secondary_Tribe"] <- "NAME"
names(Regions_col)[names(Regions_col)=="Regions"] <- "NAME"
names(df2)[names(df2)=="State"] <- "NAME"
ObsAllSpatial_Bounds <- rbind(df2[, -which(names(df2) %in% c("Tribe","Secondary_Tribe","Region"))],
Regions_col[, -which(names(Regions_col) %in% c("State","Tribe","Secondary_Tribe"))],
Tribes_col[, -which(names(Tribes_col) %in% c("State","Secondary_Tribe","Region"))],
Tribes_col2[, -which(names(Tribes_col2) %in% c("State","Tribe","Region"))])
ObsAllSpatial_Bounds <- filter(ObsAllSpatial_Bounds, NAME != "")
}
## Cap hardness values based on specific criteria
obsCapped <- within(ObsAllSpatial_Bounds, Hardness[Hardness>400] <- 400) #Maximum hardness of 400 mg/L for most criteria in the region
index <- 1 + which(colnames(obsCapped)=="Hardness" )
samples_long <- gather(obsCapped, "variable", "conc", index:ncol(obsCapped))
#if (input$Categories==TRUE) {
GroupCategories <- colnames(samples_long) [(which(colnames(samples_long)=="Lon")+1):(which(colnames(samples_long)=="Hardness")-1)]
ScreenCategories <- c(GroupCategories, "variable")
samples_long <- samples_long %>% mutate(Sample_Type = ifelse(NAME=="New Mexico" & Sample_Type=="Total" & variable=="Aluminum",
"Total Recoverable",
Sample_Type))
UniqueObs <- unique(samples_long[ScreenCategories])
OutputCategories <- c("Designated_Use","ScreenType",GroupCategories,"Metal","Times_Exceeded","Number_Screened")
output_screen <- data.frame(matrix(ncol = length(OutputCategories), nrow = rows),
stringsAsFactors=FALSE)
names(output_screen) <- OutputCategories
output_screen[,OutputCategories] <- lapply(output_screen[,OutputCategories],as.character)
output_screen$Times_Exceeded <- as.numeric(output_screen$Times_Exceeded)
output_screen$Number_Screened <- as.numeric(output_screen$Number_Screened)
output_screen$Times_Exceeded[is.na(output_screen$Times_Exceeded)] <- 0
output_screen$Number_Screened[is.na(output_screen$Number_Screened)] <- 0
output_screen[is.na(output_screen)] <- ""
# } else {
# for (i in 1:nrow(samples_long)) {
# samples_long$Sample_Type[i] = ifelse(samples_long$NAME[i]=="New Mexico" &
# samples_long$Sample_Type[i]=="Total" &
# samples_long$variable[i]=="Aluminum",
# "Total Recoverable",
# samples_long$Sample_Type[i])}
# UniqueObs <- unique(samples_long[c("NAME","Sample_Type", "variable")])
# output_screen <- data.frame(Designated_Use = character(rows),
# ScreenType = character(rows),
# NAME = character(rows),
# Sample_Type = character(rows),
# Metal = character(rows),
# Times_Exceeded = numeric(rows),
# Number_Screened = numeric(rows),
# stringsAsFactors=FALSE)
#}
## This is the main function of the tool. For each sample the applicable screening criteria are identified and used to
## determine the number of times a WQ criteria has been exceeded for a specific screen.
for (i in 1:nrow(UniqueObs)) { #loops through each sample by unique combinations of region, conc type(row), and metal
print(UniqueObs[i,])
screen <- filter(WQCritAll, NAME==UniqueObs$NAME[i], #iteratively queries WQ criteria based on sample data (sample & metal)
variable==UniqueObs$variable[i],
Sample_Type==UniqueObs$Sample_Type[i])
if (length(screen$value) > 0){
#if (input$Categories==TRUE) { # Converts designated columns into Categories
filtercolumns <- which((names(samples_long) %in% names(UniqueObs[i,]))==TRUE)
filt1 <- NULL
filt2 <- NULL
filtervar <- NULL
for (l in 1:length(filtercolumns)){ # generates variable with string to pass to filter_
filt1[l] <- names(samples_long[filtercolumns[l]])
filt2[l] <-UniqueObs[i,l]
filtervar[l] <-paste(filt1[l],"==","'",filt2[l],"'", sep="")
}
tempSamples <- samples_long %>% filter(UQ(rlang::sym(filt1[1]))==filt2[1]) %>%
filter(UQ(rlang::sym(filt1[2]))==filt2[2]) %>%
filter(UQ(rlang::sym(filt1[3]))==filt2[3]) %>%
filter(UQ(rlang::sym(filt1[4]))==filt2[4]) %>%
filter(UQ(rlang::sym(filt1[5]))==filt2[5])
# } else {
# tempSamples <- filter(samples_long, NAME==UniqueObs$NAME[i], Sample_Type==UniqueObs$Sample_Type[i], variable == UniqueObs$variable[i]) #subset observed data by unique combination
#}
if (UniqueObs$NAME[i]=="New Mexico" &
UniqueObs$Sample_Type[i]=="Total" &
UniqueObs$variable[i]=="Aluminum") { #New Mexico hardness limit for total Al = 220 mg/L
tempSamples <- tempSamples %>% within(Hardness[Hardness>220] <- 220) %>%
mutate(Sample_Type = "Total Recoverable",
conc = conc*0.31)
}
for (b in 1:length(screen$ScreenType)) { #loop through matching screens
if (!is.na(screen$maSlope[b]==TRUE)) { #find screens that need to be calculated based on hardness
aquatic_screen <- data.frame(Date_Time = character(nrow(tempSamples)),
Sample_No = character(nrow(tempSamples)),
Designated_Use = character(nrow(tempSamples)),
Sp_Layer = character(nrow(tempSamples)),
ScreenType = character(nrow(tempSamples)),
Lat = numeric(nrow(tempSamples)),
Lon = numeric(nrow(tempSamples)),
NAME = character(nrow(tempSamples)),
Sample_Type = character(nrow(tempSamples)),
CritMetal = character(nrow(tempSamples)),
CalcValue = numeric(nrow(tempSamples)),
SampleValue = numeric(nrow(tempSamples)),
ObsMetal = character(nrow(tempSamples)),
stringsAsFactors=FALSE)
g=1
if (screen$alphaBeta[b] == 0) { #calculator function 1
for (y in 1:nrow(tempSamples)) { #iterate through each sample
screen$value[b] <- as.numeric((exp((screen$maSlope[b]*log(tempSamples$Hardness[y]))+screen$mbIntercept[b])*screen$conversionFactor[b])/1000) #calculate criteria
aquatic_screen[g,] <- c(tempSamples$Date_Time[y],
tempSamples$Samp_No[y],
screen$Designated_Use[b],
tempSamples$Sp_Layer[y],
screen$ScreenType[b],
tempSamples$Lat[y],
tempSamples$Lon[y],
screen$NAME[b],
screen$Sample_Type[b],
screen$variable[b],
as.numeric(screen$value[b]),
as.numeric(tempSamples$conc[y]),
tempSamples$variable[y]) #collect criteria and sample value (for screen eval)
aquatic_screen[, c(11:12)] <- sapply(aquatic_screen[, c(11:12)], as.numeric)
g=g+1
}
} else if (screen$alphaBeta[b] == 1) { #calculator function 2
for (z in 1:nrow(tempSamples)) { #iterate through each sample
screen$value[b] <- as.numeric((exp((screen$maSlope[b]*log(tempSamples$Hardness[z])+screen$mbIntercept[b]))*(screen$alpha[b]-(log(tempSamples$Hardness[z])*screen$beta[b])))/1000) #calculate criteria
aquatic_screen[g,] <- c(tempSamples$Date_Time[z],
tempSamples$Samp_No[z],
screen$Designated_Use[b],
tempSamples$Sp_Layer[z],
screen$ScreenType[b],
tempSamples$Lat[z],
tempSamples$Lon[z],
screen$NAME[b],
screen$Sample_Type[b],
screen$variable[b],
as.numeric(screen$value[b]),
as.numeric(tempSamples$conc[z]),
tempSamples$variable[z]) #collect criteria and sample value (for screen eval)
aquatic_screen[, c(11:12)] <- sapply(aquatic_screen[, c(11:12)], as.numeric)
g=g+1
}
} else {
cat("Something went wrong with the hardness calculator.", "The error occured calculating the screening criteria for",screen$Sample_Type[b],
screen$variable[b], "using the",screen$ScreenType[b], "screen for",screen$NAME[b])
}
aquatic_screen_cleaned <- filter(aquatic_screen, CalcValue >= 0 & SampleValue >= 0 & CritMetal != "") #remove empty rows in data.frame
n_screened <- nrow(aquatic_screen_cleaned) #count the number of samples that are screened
n_screened[is.null(n_screened)] <- -50
if (n_screened > 0) {
metal_vector_exceedances <- aquatic_screen_cleaned[which(aquatic_screen_cleaned$SampleValue > aquatic_screen_cleaned$CalcValue),] #filter criteria with exceedances
metal_exceedance_count <- nrow(metal_vector_exceedances) #count exceedances
m=m+1
for (f in 1:nrow(aquatic_screen_cleaned)) {
n=n+1
Samplemarkerlayer[n,] <- aquatic_screen_cleaned[f,]
}
# if (input$Categories==TRUE) {
nCategories <- (UniqueObs[,c(-1,-ncol(UniqueObs))])
screenvars1 <- c(screen$Designated_Use[b],
screen$ScreenType[b],
screen$NAME[b])
screenvars2 <- NULL
for (x in 1:length(GroupCategories[-length(GroupCategories)])) {
screenvars2[x] <- if (length(GroupCategories[-length(GroupCategories)])>1) {nCategories[i,x]} else {nCategories[i]}
}
screenvars3 <- c(screen$variable[b],
metal_exceedance_count,
n_screened)
screenvarTot <- c(screenvars1,screenvars2,screenvars3)
output_screen[m,] <- screenvarTot
# } else {
# output_screen[m,] <- c(screen$Designated_Use[b],
# screen$ScreenType[b],
# screen$NAME[b],
# screen$Sample_Type[b],
# screen$variable[b],
# metal_exceedance_count,
# n_screened)
# }
}
} else {
if (!all(is.na(tempSamples$conc))) { #distinguishes between a non-detect sample and no sample
metal_vector_nonas <- tempSamples[!is.na(tempSamples$conc),]#remove NAs
num_metal_samples <- nrow(metal_vector_nonas) #count the number of samples that are screened
if(is.null(num_metal_samples)){num_metal_samples <- -50}
if (num_metal_samples > 0) {
metal_vector_exceedances <- metal_vector_nonas[which(metal_vector_nonas$conc>screen$value[b]),] #filter criteria with exceedances
metal_exceedance_count <- nrow(metal_vector_exceedances) #count exceedances
m=m+1
for (t in 1:num_metal_samples) {
n=n+1
Samplemarkerlayer[n,] <- c(metal_vector_nonas$Date_Time[t],
metal_vector_nonas$Samp_No[t],
screen$Designated_Use[b],
metal_vector_nonas$Sp_Layer[t],
screen$ScreenType[b],
metal_vector_nonas$Lat[t],
metal_vector_nonas$Lon[t],
metal_vector_nonas$NAME[t],
metal_vector_nonas$Sample_Type[t],
screen$variable[b],
as.numeric(screen$value[b]),
as.numeric(metal_vector_nonas$conc[t]),
unique(tempSamples$variable))
}
#if (input$Categories==TRUE) {
nCategories <- (UniqueObs[,c(-1,-ncol(UniqueObs))])
screenvars1 <- c(screen$Designated_Use[b],
screen$ScreenType[b],
screen$NAME[b])
screenvars2 <- NULL
for (x in 1:length(GroupCategories[-length(GroupCategories)])) {
screenvars2[x] <- if (length(GroupCategories[-length(GroupCategories)])>1) {nCategories[i,x]} else {nCategories[i]}
}
screenvars3 <- c(screen$variable[b],
metal_exceedance_count,
num_metal_samples)
screenvarTot <- c(screenvars1,screenvars2,screenvars3)
output_screen[m,] <- screenvarTot
# } else {
# output_screen[m,] <- c(screen$Designated_Use[b],
# screen$ScreenType[b],
# screen$NAME[b],
# screen$Sample_Type[b],
# screen$variable[b],
# metal_exceedance_count,
# num_metal_samples)
#}
}
}
}
}
} else {
cat(UniqueObs$Sample_Type[i],
UniqueObs$variable,
UniqueObs$NAME[i],
file="echoFile.txt", append=TRUE)
}
}
#start_time <- Sys.time()
#sleep_for_a_minute()
#end_time <- Sys.time()
#end_time - start_time
output_screen <- filter(output_screen, ScreenType!="")
output_screen$Times_Exceeded <- as.numeric(output_screen$Times_Exceeded)
output_screen_Exceeded <- filter(output_screen, Times_Exceeded > 0)
#write.csv(WQCritAll,file="WQCritAll.csv", row.names=FALSE)
write.csv(output_screen,file="Reload1_file_nonshinyapp.csv", row.names=FALSE)
if (exists("Samplemarkerlayer")){
samplemarkers_screen <- filter(Samplemarkerlayer, ScreenType!="") %>%
mutate(SampleValue = as.numeric(SampleValue),
CalcValue = as.numeric(CalcValue),
Difference = SampleValue/CalcValue,
Type = ifelse(Difference < 1,"NotExceeded","Exceeded"),
Lat = as.numeric(Lat),
Lon = as.numeric(Lon),
#Date_Time = as.POSIXct(Date_Time,format = '%m/%d/%Y %H:%M'), #,usetz = FALSE
Date_Time = format(as.POSIXct(Date_Time,format = '%m/%d/%Y %H:%M'),format='%Y-%m-%d')) # %H:%M
write.csv(samplemarkers_screen,file="Samplelatlondiferences.csv",row.names = FALSE)
}
|
# vetores para os testes
a <- c("R is free software and comes with ABSOLUTELY NO WARRANTY.","You are welcome to redistribute it under certain conditions.","Type 'license()' or 'licence()' for distribution details.","","R is a collaborative project with many contributors.","Type 'contributors()' for more information and","on how to cite R or R packages in publications.","","Type 'demo()' for some demos, 'help()' for on-line help, or","'help.start()' for an HTML browser interface to help.","Type 'q()' to quit R.")
b <- c("", a) # caso com a primeira linha em branco
c <- c("", a, "") # caso com a última linha em branco
# função
arruma_vetor_baguncado <- function(vetor_baguncado) {
k <- 1 # vai controlar a posição dos elementos do vetor de saída, "vetor_arrumado"
texto_concatenado <- NULL # vai acumulando o texto das linhas não vazias
vetor_arrumado <- NULL # vai ser o vetor de saída, com as linhas corrigidas
for (i in 1:length(vetor_baguncado)) {
if (vetor_baguncado[i] == "" & i == 1) { # (1)
next
}
else if (vetor_baguncado[i] == "" | i == length(vetor_baguncado)) { # (2)
vetor_arrumado[k] <- texto_concatenado
texto_concatenado <- NULL
k <- k + 1
}
else {
texto_concatenado <- paste0(texto_concatenado, vetor_baguncado[i])
}
}
return(vetor_arrumado)
}
# (1): se a linha em branco for a primeira, não faz nada, pula para a próxima linha
# (2): quando ele encontra uma linha em branco (ou quando chega ao fim do vetor baguncado), acrescenta o texto que foi sendo concatenado à próxima posição do vetor de saída, e reinicia a variavel que estava acumulando o texto.
# (3): se não é uma linha em branco, e nem o final do vetor de entrada (bagunçado), pega o texto da linha atual e acrescenta ao texto acumulado/concatenado.
# testes
a
b
c
arruma_vetor_baguncado(a)
arruma_vetor_baguncado(b)
arruma_vetor_baguncado(c)
|
/R-Brasil/arruma-vetor-texto.R
|
no_license
|
tiagombp/learning-rstats
|
R
| false
| false
| 1,942
|
r
|
# vetores para os testes
a <- c("R is free software and comes with ABSOLUTELY NO WARRANTY.","You are welcome to redistribute it under certain conditions.","Type 'license()' or 'licence()' for distribution details.","","R is a collaborative project with many contributors.","Type 'contributors()' for more information and","on how to cite R or R packages in publications.","","Type 'demo()' for some demos, 'help()' for on-line help, or","'help.start()' for an HTML browser interface to help.","Type 'q()' to quit R.")
b <- c("", a) # caso com a primeira linha em branco
c <- c("", a, "") # caso com a última linha em branco
# função
arruma_vetor_baguncado <- function(vetor_baguncado) {
k <- 1 # vai controlar a posição dos elementos do vetor de saída, "vetor_arrumado"
texto_concatenado <- NULL # vai acumulando o texto das linhas não vazias
vetor_arrumado <- NULL # vai ser o vetor de saída, com as linhas corrigidas
for (i in 1:length(vetor_baguncado)) {
if (vetor_baguncado[i] == "" & i == 1) { # (1)
next
}
else if (vetor_baguncado[i] == "" | i == length(vetor_baguncado)) { # (2)
vetor_arrumado[k] <- texto_concatenado
texto_concatenado <- NULL
k <- k + 1
}
else {
texto_concatenado <- paste0(texto_concatenado, vetor_baguncado[i])
}
}
return(vetor_arrumado)
}
# (1): se a linha em branco for a primeira, não faz nada, pula para a próxima linha
# (2): quando ele encontra uma linha em branco (ou quando chega ao fim do vetor baguncado), acrescenta o texto que foi sendo concatenado à próxima posição do vetor de saída, e reinicia a variavel que estava acumulando o texto.
# (3): se não é uma linha em branco, e nem o final do vetor de entrada (bagunçado), pega o texto da linha atual e acrescenta ao texto acumulado/concatenado.
# testes
a
b
c
arruma_vetor_baguncado(a)
arruma_vetor_baguncado(b)
arruma_vetor_baguncado(c)
|
#날짜 : 2021/01/19
#이름 : 김은표
#내용 : Ch04.제어문과 함수 - 반복문 교재 p115
#교재 p115 실습 - for() 사용 기본
i <- c(1:10)
for(n in i){
print(n * 10)
print(n)
}
#교재 p116 실습 - 짝수 값만 출력하기
i <- c(1:10)
for(n in i)
if(n %% 2 == 0)print(n)
#교재 p116 실습 - 짝수이면 넘기고, 홀수 값만 출력하기
i <- c(1:10)
for(n in i){
if(n %% 2 == 0){
next
}else{
print(n)
}
}
#교재 p116 실습 - 변수의 칼럼명 출력하기
name <- c(names(exam))
for(n in name){
print(n)
}
#교재 p117 실습 - 벡터 데이터 사용하기
score <- c(85, 95, 98)
name <- c("홍길동", "이순신", "강감찬")
i <- i + 1
for(s in score){
cat(name[i], "->", s, "\n")
i <- i + 1
}
#교재 p117 실습 - while() 사용하기
i = 0
while (i < 10){
i <- i + 1
print(i)
}
|
/Ch04/4_3_Loop.R
|
no_license
|
kepchef/R
|
R
| false
| false
| 852
|
r
|
#날짜 : 2021/01/19
#이름 : 김은표
#내용 : Ch04.제어문과 함수 - 반복문 교재 p115
#교재 p115 실습 - for() 사용 기본
i <- c(1:10)
for(n in i){
print(n * 10)
print(n)
}
#교재 p116 실습 - 짝수 값만 출력하기
i <- c(1:10)
for(n in i)
if(n %% 2 == 0)print(n)
#교재 p116 실습 - 짝수이면 넘기고, 홀수 값만 출력하기
i <- c(1:10)
for(n in i){
if(n %% 2 == 0){
next
}else{
print(n)
}
}
#교재 p116 실습 - 변수의 칼럼명 출력하기
name <- c(names(exam))
for(n in name){
print(n)
}
#교재 p117 실습 - 벡터 데이터 사용하기
score <- c(85, 95, 98)
name <- c("홍길동", "이순신", "강감찬")
i <- i + 1
for(s in score){
cat(name[i], "->", s, "\n")
i <- i + 1
}
#교재 p117 실습 - while() 사용하기
i = 0
while (i < 10){
i <- i + 1
print(i)
}
|
#' Parse the info component of a gt3x file
#'
#' @param info connection to the info.txt file
#' @param tz character. The timezone
#' @param verbose logical. Print updates to console?
#' @param ... further arguments/methods. Currently unused.
#'
#' @keywords internal
#'
parse_info_txt <- function(info, tz = "UTC", verbose, ...) {
if (verbose) cat("\n Parsing info.txt")
# Read text file and assemble data frame
meta <- readLines(info)
meta <- strsplit(meta, ": ")
meta_names <- unlist(
lapply(meta, function(x) x[1])
)
meta_names <- gsub(" ", "_", meta_names)
meta <- data.frame(
t(unlist(lapply(meta, function(x) x[2]))),
row.names = NULL,
stringsAsFactors = FALSE
)
names(meta) <- meta_names
# Format data frame
num_vars <- c(
"Battery_Voltage", "Sample_Rate", "Board_Revision",
"Unexpected_Resets", "Acceleration_Scale",
"Acceleration_Min", "Acceleration_Max"
)
stopifnot(all(num_vars %in% names(meta)))
for (i in num_vars) {
meta[ ,i] <- as.numeric(
as.character(meta[ ,i])
)
}
tick_vars <- c(
"Start_Date", "Stop_Date",
"Last_Sample_Time", "Download_Date"
)
stopifnot(all(tick_vars %in% names(meta)))
for (j in tick_vars) {
meta[ ,j] <- tick_to_posix(
meta[ ,j], tz
)
}
meta$Download_Date <- strftime(
meta$Download_Date,
"%m/%d/%Y"
)
if (verbose) cat(" ............. COMPLETE")
return(meta)
}
|
/R/read_gt3x_parse_info_txt.R
|
permissive
|
muschellij2/AGread
|
R
| false
| false
| 1,436
|
r
|
#' Parse the info component of a gt3x file
#'
#' @param info connection to the info.txt file
#' @param tz character. The timezone
#' @param verbose logical. Print updates to console?
#' @param ... further arguments/methods. Currently unused.
#'
#' @keywords internal
#'
parse_info_txt <- function(info, tz = "UTC", verbose, ...) {
if (verbose) cat("\n Parsing info.txt")
# Read text file and assemble data frame
meta <- readLines(info)
meta <- strsplit(meta, ": ")
meta_names <- unlist(
lapply(meta, function(x) x[1])
)
meta_names <- gsub(" ", "_", meta_names)
meta <- data.frame(
t(unlist(lapply(meta, function(x) x[2]))),
row.names = NULL,
stringsAsFactors = FALSE
)
names(meta) <- meta_names
# Format data frame
num_vars <- c(
"Battery_Voltage", "Sample_Rate", "Board_Revision",
"Unexpected_Resets", "Acceleration_Scale",
"Acceleration_Min", "Acceleration_Max"
)
stopifnot(all(num_vars %in% names(meta)))
for (i in num_vars) {
meta[ ,i] <- as.numeric(
as.character(meta[ ,i])
)
}
tick_vars <- c(
"Start_Date", "Stop_Date",
"Last_Sample_Time", "Download_Date"
)
stopifnot(all(tick_vars %in% names(meta)))
for (j in tick_vars) {
meta[ ,j] <- tick_to_posix(
meta[ ,j], tz
)
}
meta$Download_Date <- strftime(
meta$Download_Date,
"%m/%d/%Y"
)
if (verbose) cat(" ............. COMPLETE")
return(meta)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summaryTable.R
\name{summaryTable}
\alias{summaryTable}
\title{summaryTable}
\usage{
summaryTable(datat, dir = NULL)
}
\arguments{
\item{dir}{filename to save output}
\item{data}{Individual patient records}
}
\value{
dataframe
}
\description{
\code{summaryTable} gives basic statistics for patient groupings in IDEA data extract
}
|
/man/summaryTable.Rd
|
no_license
|
n8thangreen/IDEAdectree
|
R
| false
| true
| 410
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summaryTable.R
\name{summaryTable}
\alias{summaryTable}
\title{summaryTable}
\usage{
summaryTable(datat, dir = NULL)
}
\arguments{
\item{dir}{filename to save output}
\item{data}{Individual patient records}
}
\value{
dataframe
}
\description{
\code{summaryTable} gives basic statistics for patient groupings in IDEA data extract
}
|
library(dplyr)
library(caret)
library(randomForest)
library(rpart)
data2017 = readxl::read_xlsx("data/_flat_2017.xlsx")
g <- list(
scope = 'usa',
projection = list(type = 'albers usa')
)
plot_geo(data2017, lon = ~WGS84_latitude, lat = ~WGS84_longitude) %>%
add_markers(data2017, size = ~`total_AV_pre-roll`) %>%
layout(geo = g)
data2017 = data2017 %>%
mutate(diff = SalePrice / TotalAV_17,
inRange = ifelse(diff > .9 & diff < 1.1, 1, 0),
Twp = as.factor(Twp), Grade = as.factor(Grade),
Cond = as.factor(Cond),
Nbrhd = as.factor(Nbrhd))
salesOnly = data2017 %>%
filter(!is.na(SalePrice)) %>%
select(diff, inRange, Twp, Acreage, Extensions, Improvements, Grade, Cond,
YrBuilt, DwellFinishedArea, Floors, Dwellings,
Bedrooms, Bathrooms, HeatType, KitchenSinks, TotalFixtures,
Twp, Grade, Cond, Nbrhd) %>%
na.omit() %>%
mutate(inRange = as.factor(inRange))
# Predicting Range Correct #
correctRangeTree = salesOnly %>%
select(-diff) %>%
party::ctree(inRange ~ ., data = .)
table(predict(correctRangeTree), salesOnly$inRange)
plot(correctRangeTree)
correctRangeForest = salesOnly %>%
select(-diff) %>%
party::cforest(inRange ~ ., data = .)
table(predict(correctRangeForest), salesOnly$inRange)
varImpPlot(differenceForest)
svmTest = salesOnly %>%
select(-diff) %>%
e1071::svm(inRange ~ ., data = .)
summary(svmTest)
pred = fitted(svmTest)
table(fitted(svmTest), salesOnly$inRange)
# Predicting Proportion #
differenceTree = rpart(diff ~ YrBuilt + DwellFinishedArea + Bedrooms + Bathrooms,
data = salesOnly, method = "anova")
differenceForest = randomForest(diff ~ YrBuilt + DwellFinishedArea + Bedrooms + Bathrooms,
data = salesOnly, mtry = 2)
varImpPlot(differenceForest)
|
/initialModels.R
|
no_license
|
saberry/sjcAV
|
R
| false
| false
| 1,862
|
r
|
library(dplyr)
library(caret)
library(randomForest)
library(rpart)
data2017 = readxl::read_xlsx("data/_flat_2017.xlsx")
g <- list(
scope = 'usa',
projection = list(type = 'albers usa')
)
plot_geo(data2017, lon = ~WGS84_latitude, lat = ~WGS84_longitude) %>%
add_markers(data2017, size = ~`total_AV_pre-roll`) %>%
layout(geo = g)
data2017 = data2017 %>%
mutate(diff = SalePrice / TotalAV_17,
inRange = ifelse(diff > .9 & diff < 1.1, 1, 0),
Twp = as.factor(Twp), Grade = as.factor(Grade),
Cond = as.factor(Cond),
Nbrhd = as.factor(Nbrhd))
salesOnly = data2017 %>%
filter(!is.na(SalePrice)) %>%
select(diff, inRange, Twp, Acreage, Extensions, Improvements, Grade, Cond,
YrBuilt, DwellFinishedArea, Floors, Dwellings,
Bedrooms, Bathrooms, HeatType, KitchenSinks, TotalFixtures,
Twp, Grade, Cond, Nbrhd) %>%
na.omit() %>%
mutate(inRange = as.factor(inRange))
# Predicting Range Correct #
correctRangeTree = salesOnly %>%
select(-diff) %>%
party::ctree(inRange ~ ., data = .)
table(predict(correctRangeTree), salesOnly$inRange)
plot(correctRangeTree)
correctRangeForest = salesOnly %>%
select(-diff) %>%
party::cforest(inRange ~ ., data = .)
table(predict(correctRangeForest), salesOnly$inRange)
varImpPlot(differenceForest)
svmTest = salesOnly %>%
select(-diff) %>%
e1071::svm(inRange ~ ., data = .)
summary(svmTest)
pred = fitted(svmTest)
table(fitted(svmTest), salesOnly$inRange)
# Predicting Proportion #
differenceTree = rpart(diff ~ YrBuilt + DwellFinishedArea + Bedrooms + Bathrooms,
data = salesOnly, method = "anova")
differenceForest = randomForest(diff ~ YrBuilt + DwellFinishedArea + Bedrooms + Bathrooms,
data = salesOnly, mtry = 2)
varImpPlot(differenceForest)
|
#' @export
populateShinyApp <- function(shinyDirectory,
resultDirectory,
minCellCount = 10,
databaseName = 'sharable name of development data'){
#check inputs
if(missing(shinyDirectory)){
shinyDirectory <- system.file("shiny", "PLPViewer", package = "SUhypoglycemia")
}
if(missing(resultDirectory)){
stop('Need to enter the resultDirectory')
}
if(!dir.exists(resultDirectory)){
stop('resultDirectory does not exist')
}
outputDirectory <- file.path(shinyDirectory,'data')
# create the shiny data folder
if(!dir.exists(outputDirectory)){
dir.create(outputDirectory, recursive = T)
}
# copy the settings csv
file <- utils::read.csv(file.path(resultDirectory,'settings.csv'))
utils::write.csv(file, file.path(outputDirectory,'settings.csv'), row.names = F)
# copy each analysis as a rds file and copy the log
files <- dir(resultDirectory, full.names = F)
files <- files[grep('Analysis', files)]
for(file in files){
if(!dir.exists(file.path(outputDirectory,file))){
dir.create(file.path(outputDirectory,file))
}
if(dir.exists(file.path(resultDirectory,file, 'plpResult'))){
res <- PatientLevelPrediction::loadPlpResult(file.path(resultDirectory,file, 'plpResult'))
res <- PatientLevelPrediction::transportPlp(res, n= minCellCount,
save = F, dataName = databaseName)
saveRDS(res, file.path(outputDirectory,file, 'plpResult.rds'))
}
if(file.exists(file.path(resultDirectory,file, 'plpLog.txt'))){
file.copy(from = file.path(resultDirectory,file, 'plpLog.txt'),
to = file.path(outputDirectory,file, 'plpLog.txt'))
}
}
# copy any validation results
if(dir.exists(file.path(resultDirectory,'Validation'))){
valFolders <- dir(file.path(resultDirectory,'Validation'), full.names = F)
if(length(valFolders)>0){
# move each of the validation rds
for(valFolder in valFolders){
# get the analysisIds
valSubfolders <- dir(file.path(resultDirectory,'Validation',valFolder), full.names = F)
if(length(valSubfolders)!=0){
for(valSubfolder in valSubfolders ){
valOut <- file.path(valFolder,valSubfolder)
if(!dir.exists(file.path(outputDirectory,'Validation',valOut))){
dir.create(file.path(outputDirectory,'Validation',valOut), recursive = T)
}
if(file.exists(file.path(resultDirectory,'Validation',valOut, 'validationResult.rds'))){
res <- readRDS(file.path(resultDirectory,'Validation',valOut, 'validationResult.rds'))
res <- PatientLevelPrediction::transportPlp(res, n= minCellCount,
save = F, dataName = databaseName)
saveRDS(res, file.path(outputDirectory,'Validation',valOut, 'validationResult.rds'))
}
}
}
}
}
}
return(outputDirectory)
}
#' View shiny app
#' @details
#' This function will open an interactive shiny app for viewing the results
#' @param package The name of the package as a string
#'
#' @examples
#' \dontrun{
#' viewShiny()
#' }
#' @export
viewShiny <- function(package = NULL){
if(is.null(package)){
appDir <- system.file("shiny", "PLPViewer", package = "SUhypoglycemia")
}
if(!is.null(package)){
appDir <- system.file("shiny", "PLPViewer", package = package)
}
shiny::shinyAppDir(appDir)
}
|
/2019SymposiumTutorial-PLP/SUhypoglycemia/R/populateShinyApp.R
|
permissive
|
ohdsi-korea/OhdsiKoreaTutorials
|
R
| false
| false
| 3,643
|
r
|
#' @export
populateShinyApp <- function(shinyDirectory,
resultDirectory,
minCellCount = 10,
databaseName = 'sharable name of development data'){
#check inputs
if(missing(shinyDirectory)){
shinyDirectory <- system.file("shiny", "PLPViewer", package = "SUhypoglycemia")
}
if(missing(resultDirectory)){
stop('Need to enter the resultDirectory')
}
if(!dir.exists(resultDirectory)){
stop('resultDirectory does not exist')
}
outputDirectory <- file.path(shinyDirectory,'data')
# create the shiny data folder
if(!dir.exists(outputDirectory)){
dir.create(outputDirectory, recursive = T)
}
# copy the settings csv
file <- utils::read.csv(file.path(resultDirectory,'settings.csv'))
utils::write.csv(file, file.path(outputDirectory,'settings.csv'), row.names = F)
# copy each analysis as a rds file and copy the log
files <- dir(resultDirectory, full.names = F)
files <- files[grep('Analysis', files)]
for(file in files){
if(!dir.exists(file.path(outputDirectory,file))){
dir.create(file.path(outputDirectory,file))
}
if(dir.exists(file.path(resultDirectory,file, 'plpResult'))){
res <- PatientLevelPrediction::loadPlpResult(file.path(resultDirectory,file, 'plpResult'))
res <- PatientLevelPrediction::transportPlp(res, n= minCellCount,
save = F, dataName = databaseName)
saveRDS(res, file.path(outputDirectory,file, 'plpResult.rds'))
}
if(file.exists(file.path(resultDirectory,file, 'plpLog.txt'))){
file.copy(from = file.path(resultDirectory,file, 'plpLog.txt'),
to = file.path(outputDirectory,file, 'plpLog.txt'))
}
}
# copy any validation results
if(dir.exists(file.path(resultDirectory,'Validation'))){
valFolders <- dir(file.path(resultDirectory,'Validation'), full.names = F)
if(length(valFolders)>0){
# move each of the validation rds
for(valFolder in valFolders){
# get the analysisIds
valSubfolders <- dir(file.path(resultDirectory,'Validation',valFolder), full.names = F)
if(length(valSubfolders)!=0){
for(valSubfolder in valSubfolders ){
valOut <- file.path(valFolder,valSubfolder)
if(!dir.exists(file.path(outputDirectory,'Validation',valOut))){
dir.create(file.path(outputDirectory,'Validation',valOut), recursive = T)
}
if(file.exists(file.path(resultDirectory,'Validation',valOut, 'validationResult.rds'))){
res <- readRDS(file.path(resultDirectory,'Validation',valOut, 'validationResult.rds'))
res <- PatientLevelPrediction::transportPlp(res, n= minCellCount,
save = F, dataName = databaseName)
saveRDS(res, file.path(outputDirectory,'Validation',valOut, 'validationResult.rds'))
}
}
}
}
}
}
return(outputDirectory)
}
#' View shiny app
#' @details
#' This function will open an interactive shiny app for viewing the results
#' @param package The name of the package as a string
#'
#' @examples
#' \dontrun{
#' viewShiny()
#' }
#' @export
viewShiny <- function(package = NULL){
if(is.null(package)){
appDir <- system.file("shiny", "PLPViewer", package = "SUhypoglycemia")
}
if(!is.null(package)){
appDir <- system.file("shiny", "PLPViewer", package = package)
}
shiny::shinyAppDir(appDir)
}
|
#' calc.B2, Calculates standardized version of Levins (1968) B2 measure of niche breadth given a vector of suitabilities
#'
#' @param x A numeric vector
#'
#' @return B2 A calculation of Levins (1968) B2 metric
#'
#' @keywords niche breadth sdm enm
#'
#' @export calc.B2
#'
#' @examples
#' calc.B2(c(1, .001, .001))
calc.B2 <- function(x){
x <- x[!is.na(x)]
x <- x/sum(x)
min.B2 <- 1
max.B2 <- 1/(length(x) * (1/length(x))**2)
this.B2 <- 1/sum(x**2)
return((this.B2 -min.B2)/(max.B2 - min.B2))
}
|
/R/calc.B2.R
|
no_license
|
johnbaums/ENMTools
|
R
| false
| false
| 509
|
r
|
#' calc.B2, Calculates standardized version of Levins (1968) B2 measure of niche breadth given a vector of suitabilities
#'
#' @param x A numeric vector
#'
#' @return B2 A calculation of Levins (1968) B2 metric
#'
#' @keywords niche breadth sdm enm
#'
#' @export calc.B2
#'
#' @examples
#' calc.B2(c(1, .001, .001))
calc.B2 <- function(x){
x <- x[!is.na(x)]
x <- x/sum(x)
min.B2 <- 1
max.B2 <- 1/(length(x) * (1/length(x))**2)
this.B2 <- 1/sum(x**2)
return((this.B2 -min.B2)/(max.B2 - min.B2))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resample.R
\name{rspin}
\alias{rspin}
\title{Simulate spinning a spinner}
\usage{
rspin(n, probs, labels = 1:length(probs))
}
\arguments{
\item{n}{number of spins of spinner}
\item{probs}{a vector of probabilities. If the sum is not 1, the
probabilities will be rescaled.}
\item{labels}{a character vector of labels for the categories}
}
\description{
This is essentially \code{rmultinom} with a different interface.
}
\examples{
rspin(20, prob=c(1,2,3), labels=c("Red", "Blue", "Green"))
do(2) * rspin(20, prob=c(1,2,3), labels=c("Red", "Blue", "Green"))
}
|
/man/rspin.Rd
|
no_license
|
ProjectMOSAIC/mosaic
|
R
| false
| true
| 639
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resample.R
\name{rspin}
\alias{rspin}
\title{Simulate spinning a spinner}
\usage{
rspin(n, probs, labels = 1:length(probs))
}
\arguments{
\item{n}{number of spins of spinner}
\item{probs}{a vector of probabilities. If the sum is not 1, the
probabilities will be rescaled.}
\item{labels}{a character vector of labels for the categories}
}
\description{
This is essentially \code{rmultinom} with a different interface.
}
\examples{
rspin(20, prob=c(1,2,3), labels=c("Red", "Blue", "Green"))
do(2) * rspin(20, prob=c(1,2,3), labels=c("Red", "Blue", "Green"))
}
|
# Generalized Exponential Geometric Distribution
|
/Distributions/eachGraphs/_generalizedExponentialGeometricDistribution.R
|
no_license
|
praster1/Note_SurvivalAnalysis
|
R
| false
| false
| 48
|
r
|
# Generalized Exponential Geometric Distribution
|
arch_sites<-read.csv("C:Food web idea//Data by person//Kalina.data//arch_sites.csv", header=TRUE, sep=",")
head(arch_sites)
arch_sites$midden_feature <- ifelse(grepl("Shell Midden", arch_sites$TY_TYPOLOGY), "yes", "no")
arch_sites$CMT <- ifelse(grepl("Culturally Modified Tree", arch_sites$TY_TYPOLOGY), "yes", "no")
arch_sites$clam_garden <- ifelse(grepl("Clam Garden", arch_sites$TY_TYPOLOGY), "yes", "no")
arch_sites$fish_feature <- ifelse(grepl("Fish Trap", arch_sites$TY_TYPOLOGY), "yes", "no")
arch_sites$canoe_skid <- ifelse(grepl("Canoe Skid", arch_sites$TY_TYPOLOGY), "yes", "no")
arch_sites$any_arch <- ifelse(grepl("PRECONTACT|TRADITIONAL USE|Shell Midden|HISTORIC", arch_sites$TY_TYPOLOGY), "yes", "no")
arch_sites_selected<- arch_sites %>% dplyr::select(BORDENNUMBER, MR_GISUTMEASTING, MR_GISUTMNORTHING,
midden_feature, fish_feature, CMT, clam_garden, canoe_skid, any_arch) %>%
rename(site_id=BORDENNUMBER , easting=MR_GISUTMEASTING , northing=MR_GISUTMNORTHING)
head(arch_sites_selected)
write.csv(arch_sites_selected, "C:Biodiversity idea//Output files//arch_sites_selected.csv", row.names=FALSE)
#### sode code for combing arch sites:
#arch sites paired
arch_sites_distance_tran<-read.csv("Biodiversity idea//Output files//paired_arch_by_radius_300.csv")
head(arch_sites_distance_tran)
length(unique(arch_sites_distance_tran$unq_tran))
#84 unique transects if using 300m radius
##adding in arch data from output file fed from arch sites cleaning.R
arch_data<-read.csv("C:Biodiversity idea//Output files//arch_sites_selected.csv")
head(arch_data)
arch_data_simple<-arch_data[ , c("site_id", "CMT", "clam_garden", "midden_feature", "fish_feature", "canoe_skid")]
head(arch_data_simple)
# arch_merged<-merge(arch_sites_distance_tran, arch_data_simple, by="site_id", all=TRUE)
# head(arch_merged)
#
# fish_richness_merged_tran_arch<-merge(fish_richness_merged_tran, arch_sites_distance_tran, by="unq_tran", all.x=TRUE)
#
# #head(fish_richness_merged_tran_arch)
# length(unique(fish_richness_merged_tran_arch$unq_tran))
#
#
# fish_richness_merged_tran_arch<-merge(fish_richness_merged_tran_arch, arch_data_simple, by="site_id", all.x=TRUE)
# View(fish_richness_merged_tran_arch)
# #for sem:
# fish_richness_merged_tran_arch$midden_feature_sem<-as.character(fish_richness_merged_tran_arch$midden_feature)
# fish_richness_merged_tran_arch$midden_feature_sem<- dplyr::recode(fish_richness_merged_tran_arch$midden_feature_sem, yes = "1", no="0")
# fish_richness_merged_tran_arch$midden_feature_sem[is.na(fish_richness_merged_tran_arch$midden_feature_sem)] <- 0
# fish_richness_merged_tran_arch$midden_feature_sem<-as.numeric(fish_richness_merged_tran_arch$midden_feature_sem)
#
# fish_richness_merged_tran_arch$fish_feature_sem<-as.character(fish_richness_merged_tran_arch$fish_feature)
# fish_richness_merged_tran_arch$fish_feature_sem<-dplyr::recode(fish_richness_merged_tran_arch$fish_feature_sem, yes = "1", no="0")
# fish_richness_merged_tran_arch$fish_feature_sem[is.na(fish_richness_merged_tran_arch$fish_feature_sem)] <- 0
# fish_richness_merged_tran_arch$fish_feature_sem<-as.numeric(fish_richness_merged_tran_arch$fish_feature_sem)
#
# fish_richness_merged_tran_arch$canoe_skid_sem<-as.character(fish_richness_merged_tran_arch$canoe_skid)
# fish_richness_merged_tran_arch$canoe_skid_sem<-dplyr::recode(fish_richness_merged_tran_arch$canoe_skid_sem, yes = "1", no="0")
# fish_richness_merged_tran_arch$canoe_skid_sem[is.na(fish_richness_merged_tran_arch$canoe_skid_sem)] <- 0
# fish_richness_merged_tran_arch$canoe_skid_sem<-as.numeric(fish_richness_merged_tran_arch$canoe_skid_sem)
#
#
# fish_richness_merged_tran_arch$CMT<-as.factor(fish_richness_merged_tran_arch$CMT)
# fish_richness_merged_tran_arch$clam_garden<-as.factor(fish_richness_merged_tran_arch$clam_garden)
# fish_richness_merged_tran_arch$midden_feature<-factor(fish_richness_merged_tran_arch$midden_feature)
# fish_richness_merged_tran_arch$fish_feature<-as.factor(fish_richness_merged_tran_arch$fish_feature)
# fish_richness_merged_tran_arch$canoe_skid<-as.factor(fish_richness_merged_tran_arch$canoe_skid)
#
|
/Food web idea/R files/Current scripts/arch sites cleaning.R
|
no_license
|
nembrown/100-islands
|
R
| false
| false
| 4,210
|
r
|
arch_sites<-read.csv("C:Food web idea//Data by person//Kalina.data//arch_sites.csv", header=TRUE, sep=",")
head(arch_sites)
arch_sites$midden_feature <- ifelse(grepl("Shell Midden", arch_sites$TY_TYPOLOGY), "yes", "no")
arch_sites$CMT <- ifelse(grepl("Culturally Modified Tree", arch_sites$TY_TYPOLOGY), "yes", "no")
arch_sites$clam_garden <- ifelse(grepl("Clam Garden", arch_sites$TY_TYPOLOGY), "yes", "no")
arch_sites$fish_feature <- ifelse(grepl("Fish Trap", arch_sites$TY_TYPOLOGY), "yes", "no")
arch_sites$canoe_skid <- ifelse(grepl("Canoe Skid", arch_sites$TY_TYPOLOGY), "yes", "no")
arch_sites$any_arch <- ifelse(grepl("PRECONTACT|TRADITIONAL USE|Shell Midden|HISTORIC", arch_sites$TY_TYPOLOGY), "yes", "no")
arch_sites_selected<- arch_sites %>% dplyr::select(BORDENNUMBER, MR_GISUTMEASTING, MR_GISUTMNORTHING,
midden_feature, fish_feature, CMT, clam_garden, canoe_skid, any_arch) %>%
rename(site_id=BORDENNUMBER , easting=MR_GISUTMEASTING , northing=MR_GISUTMNORTHING)
head(arch_sites_selected)
write.csv(arch_sites_selected, "C:Biodiversity idea//Output files//arch_sites_selected.csv", row.names=FALSE)
#### sode code for combing arch sites:
#arch sites paired
arch_sites_distance_tran<-read.csv("Biodiversity idea//Output files//paired_arch_by_radius_300.csv")
head(arch_sites_distance_tran)
length(unique(arch_sites_distance_tran$unq_tran))
#84 unique transects if using 300m radius
##adding in arch data from output file fed from arch sites cleaning.R
arch_data<-read.csv("C:Biodiversity idea//Output files//arch_sites_selected.csv")
head(arch_data)
arch_data_simple<-arch_data[ , c("site_id", "CMT", "clam_garden", "midden_feature", "fish_feature", "canoe_skid")]
head(arch_data_simple)
# arch_merged<-merge(arch_sites_distance_tran, arch_data_simple, by="site_id", all=TRUE)
# head(arch_merged)
#
# fish_richness_merged_tran_arch<-merge(fish_richness_merged_tran, arch_sites_distance_tran, by="unq_tran", all.x=TRUE)
#
# #head(fish_richness_merged_tran_arch)
# length(unique(fish_richness_merged_tran_arch$unq_tran))
#
#
# fish_richness_merged_tran_arch<-merge(fish_richness_merged_tran_arch, arch_data_simple, by="site_id", all.x=TRUE)
# View(fish_richness_merged_tran_arch)
# #for sem:
# fish_richness_merged_tran_arch$midden_feature_sem<-as.character(fish_richness_merged_tran_arch$midden_feature)
# fish_richness_merged_tran_arch$midden_feature_sem<- dplyr::recode(fish_richness_merged_tran_arch$midden_feature_sem, yes = "1", no="0")
# fish_richness_merged_tran_arch$midden_feature_sem[is.na(fish_richness_merged_tran_arch$midden_feature_sem)] <- 0
# fish_richness_merged_tran_arch$midden_feature_sem<-as.numeric(fish_richness_merged_tran_arch$midden_feature_sem)
#
# fish_richness_merged_tran_arch$fish_feature_sem<-as.character(fish_richness_merged_tran_arch$fish_feature)
# fish_richness_merged_tran_arch$fish_feature_sem<-dplyr::recode(fish_richness_merged_tran_arch$fish_feature_sem, yes = "1", no="0")
# fish_richness_merged_tran_arch$fish_feature_sem[is.na(fish_richness_merged_tran_arch$fish_feature_sem)] <- 0
# fish_richness_merged_tran_arch$fish_feature_sem<-as.numeric(fish_richness_merged_tran_arch$fish_feature_sem)
#
# fish_richness_merged_tran_arch$canoe_skid_sem<-as.character(fish_richness_merged_tran_arch$canoe_skid)
# fish_richness_merged_tran_arch$canoe_skid_sem<-dplyr::recode(fish_richness_merged_tran_arch$canoe_skid_sem, yes = "1", no="0")
# fish_richness_merged_tran_arch$canoe_skid_sem[is.na(fish_richness_merged_tran_arch$canoe_skid_sem)] <- 0
# fish_richness_merged_tran_arch$canoe_skid_sem<-as.numeric(fish_richness_merged_tran_arch$canoe_skid_sem)
#
#
# fish_richness_merged_tran_arch$CMT<-as.factor(fish_richness_merged_tran_arch$CMT)
# fish_richness_merged_tran_arch$clam_garden<-as.factor(fish_richness_merged_tran_arch$clam_garden)
# fish_richness_merged_tran_arch$midden_feature<-factor(fish_richness_merged_tran_arch$midden_feature)
# fish_richness_merged_tran_arch$fish_feature<-as.factor(fish_richness_merged_tran_arch$fish_feature)
# fish_richness_merged_tran_arch$canoe_skid<-as.factor(fish_richness_merged_tran_arch$canoe_skid)
#
|
rm(list=ls())
setwd = "G:/IIT_MADRAS_DD/Semesters/7th sem (UQ)/ECON2333 (Big Data and Machine learning in Finance and economics)/Assignment_2"
data1 = read.csv('G:/IIT_MADRAS_DD/Semesters/7th sem (UQ)/ECON2333 (Big Data and Machine learning in Finance and economics)/Assignment_2/Assign2.csv')
View(data1)
attach(data1)
# 1
plot(x1,x2,'p',col='green')
points(x1[y==1],x2[y==1],col='black')
# 2
#Splitting the dataset into training and test set
#Training Set - 80% , Test Set - 20%
#Randomly splitting the dataset into train & test
indexes = sample(1:nrow(data1), size=0.2*nrow(data1))
test=data1[indexes,]
dim(test) # (200,3)
train=data1[-indexes,]
dim(train) # (800,3)
#Viewing the data
View(train)
View(test)
#Linear Model
lm.fit = lm(y~x1+x2, data=train)
lm.fit
#Predictions on the test set
prediction = predict(lm.fit, newdata = test, se.fit = FALSE, type = "response")
table(prediction>.51, test$y)
mean(prediction)
# 3
# (a) Logistic Regression
glm.fit = glm(y ~ x1+x2, data=train, family=binomial)
glm.fit
prediction1 = predict(glm.fit, newdata = test, se.fit = FALSE, type = 'response')
table(prediction1>.51, test$y)
# (b) Linear Discriminant Analysis
lda.fit = lda(y~x1+x2, data=train)
lda.fit
prediction2 = predict(lda.fit, newdata = test, se.fit = FALSE, type = 'response')
names(prediction2)
table(prediction2$class, test$y)
# (c) Quadratic Discriminant Analysis
qda.fit = qda(y~x1 + x2, data = train)
qda.fit
prediction3 = predict(qda.fit, newdata = test, se.fit = FALSE, type = 'response')
table(prediction3$class, test$y)
# 4 KNN
#Normalizing the data (zero mean and unit variance)
train.X = cbind(train$x1, train$x2)
train.Y = train$y
test.X = cbind(test$x1, test$x2)
test.Y = test$y
train.X = scale(train.X)
test.X = scale(test.X)
var(train.X[,1]) #Now the variance is 1
a = matrix(0,20,200)
b = rep(1,20)
for (i in 1:20) {
a[i,] = knn(train.X, test.X, train.Y, k=i)
#b[i] = table(a[i,], test.Y)
}
a=a-1
table(a[20,], test.Y)
#Prediction
knn.pred = knn(train.X, test.X, train.Y, k=20)
knn.pred
table(knn.pred, test.Y)
# k=1
knn.pred1 = knn(train.X, test.X, train.Y, k=1)
knn.pred1
table1 = table(knn.pred1, test.Y)
table1
# k=2
knn.pred2 = knn(train.X, test.X, train.Y, k=2)
knn.pred2
table2 = table(knn.pred2, test.Y)
table2
#k=3
knn.pred3 = knn(train.X, test.X, train.Y, k=3)
knn.pred3
table3 = table(knn.pred3, test.Y)
table3
#k=4
knn.pred4 = knn(train.X, test.X, train.Y, k=4)
knn.pred4
table4 = table(knn.pred4, test.Y)
table4
#k=5
knn.pred5 = knn(train.X, test.X, train.Y, k=5)
knn.pred5
table5 = table(knn.pred5, test.Y)
table5
#k=6
knn.pred6 = knn(train.X, test.X, train.Y, k=6)
knn.pred6
table6 = table(knn.pred6, test.Y)
table6
#k=7
knn.pred7 = knn(train.X, test.X, train.Y, k=7)
knn.pred7
table7 = table(knn.pred7, test.Y)
table7
#k=8
knn.pred8 = knn(train.X, test.X, train.Y, k=8)
knn.pred8
table8 = table(knn.pred8, test.Y)
table8
#k=9
knn.pred9 = knn(train.X, test.X, train.Y, k=9)
knn.pred9
table9 = table(knn.pred9, test.Y)
table9
#k=10
knn.pred10 = knn(train.X, test.X, train.Y, k=10)
knn.pred10
table10 = table(knn.pred10, test.Y)
table10
#k=11
knn.pred11 = knn(train.X, test.X, train.Y, k=11)
knn.pred11
table11 = table(knn.pred11, test.Y)
table11
#k=12
knn.pred12 = knn(train.X, test.X, train.Y, k=12)
knn.pred12
table12 = table(knn.pred12, test.Y)
table12
#k=13
knn.pred13 = knn(train.X, test.X, train.Y, k=13)
knn.pred13
table13 = table(knn.pred13, test.Y)
table13
#k=14
knn.pred14 = knn(train.X, test.X, train.Y, k=14)
knn.pred14
table14 = table(knn.pred14, test.Y)
table14
#k=15
knn.pred15 = knn(train.X, test.X, train.Y, k=15)
knn.pred15
table15 = table(knn.pred15, test.Y)
table15
#k=16
knn.pred16 = knn(train.X, test.X, train.Y, k=16)
knn.pred16
table16 = table(knn.pred16, test.Y)
table16
#k=17
knn.pred17 = knn(train.X, test.X, train.Y, k=17)
knn.pred17
table17 = table(knn.pred17, test.Y)
table17
#k=18
knn.pred18 = knn(train.X, test.X, train.Y, k=18)
knn.pred18
table18 = table(knn.pred18, test.Y)
table18
#k=19
knn.pred19 = knn(train.X, test.X, train.Y, k=19)
knn.pred19
table19 = table(knn.pred19, test.Y)
table19
#k=20
knn.pred20 = knn(train.X, test.X, train.Y, k=20)
knn.pred20
table20 = table(knn.pred20, test.Y)
table20
# 5
library(boot)
#Linear Model
lm.fit1 = glm(y~x1+x2, data=train)
cv.error.lm = cv.glm(train, lm.fit1, K=10)
cv.error.lm$delta[1]
cv.error.lm$delta[2]
#Logistic Model
glm.fit1 = glm(y~x1+x2, data=train, family=binomial)
cv.error.glm = cv.glm(train, glm.fit1, K=10)
cv.error.glm$delta[1]
cv.error.glm$delta[2]
#KNN
knn.cv = knn.cv(data = train.X, label = train.Y, k=7, p=10, method="classification")
names(knn.cv)
|
/Assignment_2/Assignment_2.R
|
no_license
|
sambittarai/Big-Data-and-Machine-Learning-in-Finance-and-Economics-ECON2333-
|
R
| false
| false
| 4,658
|
r
|
rm(list=ls())
setwd = "G:/IIT_MADRAS_DD/Semesters/7th sem (UQ)/ECON2333 (Big Data and Machine learning in Finance and economics)/Assignment_2"
data1 = read.csv('G:/IIT_MADRAS_DD/Semesters/7th sem (UQ)/ECON2333 (Big Data and Machine learning in Finance and economics)/Assignment_2/Assign2.csv')
View(data1)
attach(data1)
# 1
plot(x1,x2,'p',col='green')
points(x1[y==1],x2[y==1],col='black')
# 2
#Splitting the dataset into training and test set
#Training Set - 80% , Test Set - 20%
#Randomly splitting the dataset into train & test
indexes = sample(1:nrow(data1), size=0.2*nrow(data1))
test=data1[indexes,]
dim(test) # (200,3)
train=data1[-indexes,]
dim(train) # (800,3)
#Viewing the data
View(train)
View(test)
#Linear Model
lm.fit = lm(y~x1+x2, data=train)
lm.fit
#Predictions on the test set
prediction = predict(lm.fit, newdata = test, se.fit = FALSE, type = "response")
table(prediction>.51, test$y)
mean(prediction)
# 3
# (a) Logistic Regression
glm.fit = glm(y ~ x1+x2, data=train, family=binomial)
glm.fit
prediction1 = predict(glm.fit, newdata = test, se.fit = FALSE, type = 'response')
table(prediction1>.51, test$y)
# (b) Linear Discriminant Analysis
lda.fit = lda(y~x1+x2, data=train)
lda.fit
prediction2 = predict(lda.fit, newdata = test, se.fit = FALSE, type = 'response')
names(prediction2)
table(prediction2$class, test$y)
# (c) Quadratic Discriminant Analysis
qda.fit = qda(y~x1 + x2, data = train)
qda.fit
prediction3 = predict(qda.fit, newdata = test, se.fit = FALSE, type = 'response')
table(prediction3$class, test$y)
# 4 KNN
#Normalizing the data (zero mean and unit variance)
train.X = cbind(train$x1, train$x2)
train.Y = train$y
test.X = cbind(test$x1, test$x2)
test.Y = test$y
train.X = scale(train.X)
test.X = scale(test.X)
var(train.X[,1]) #Now the variance is 1
a = matrix(0,20,200)
b = rep(1,20)
for (i in 1:20) {
a[i,] = knn(train.X, test.X, train.Y, k=i)
#b[i] = table(a[i,], test.Y)
}
a=a-1
table(a[20,], test.Y)
#Prediction
knn.pred = knn(train.X, test.X, train.Y, k=20)
knn.pred
table(knn.pred, test.Y)
# k=1
knn.pred1 = knn(train.X, test.X, train.Y, k=1)
knn.pred1
table1 = table(knn.pred1, test.Y)
table1
# k=2
knn.pred2 = knn(train.X, test.X, train.Y, k=2)
knn.pred2
table2 = table(knn.pred2, test.Y)
table2
#k=3
knn.pred3 = knn(train.X, test.X, train.Y, k=3)
knn.pred3
table3 = table(knn.pred3, test.Y)
table3
#k=4
knn.pred4 = knn(train.X, test.X, train.Y, k=4)
knn.pred4
table4 = table(knn.pred4, test.Y)
table4
#k=5
knn.pred5 = knn(train.X, test.X, train.Y, k=5)
knn.pred5
table5 = table(knn.pred5, test.Y)
table5
#k=6
knn.pred6 = knn(train.X, test.X, train.Y, k=6)
knn.pred6
table6 = table(knn.pred6, test.Y)
table6
#k=7
knn.pred7 = knn(train.X, test.X, train.Y, k=7)
knn.pred7
table7 = table(knn.pred7, test.Y)
table7
#k=8
knn.pred8 = knn(train.X, test.X, train.Y, k=8)
knn.pred8
table8 = table(knn.pred8, test.Y)
table8
#k=9
knn.pred9 = knn(train.X, test.X, train.Y, k=9)
knn.pred9
table9 = table(knn.pred9, test.Y)
table9
#k=10
knn.pred10 = knn(train.X, test.X, train.Y, k=10)
knn.pred10
table10 = table(knn.pred10, test.Y)
table10
#k=11
knn.pred11 = knn(train.X, test.X, train.Y, k=11)
knn.pred11
table11 = table(knn.pred11, test.Y)
table11
#k=12
knn.pred12 = knn(train.X, test.X, train.Y, k=12)
knn.pred12
table12 = table(knn.pred12, test.Y)
table12
#k=13
knn.pred13 = knn(train.X, test.X, train.Y, k=13)
knn.pred13
table13 = table(knn.pred13, test.Y)
table13
#k=14
knn.pred14 = knn(train.X, test.X, train.Y, k=14)
knn.pred14
table14 = table(knn.pred14, test.Y)
table14
#k=15
knn.pred15 = knn(train.X, test.X, train.Y, k=15)
knn.pred15
table15 = table(knn.pred15, test.Y)
table15
#k=16
knn.pred16 = knn(train.X, test.X, train.Y, k=16)
knn.pred16
table16 = table(knn.pred16, test.Y)
table16
#k=17
knn.pred17 = knn(train.X, test.X, train.Y, k=17)
knn.pred17
table17 = table(knn.pred17, test.Y)
table17
#k=18
knn.pred18 = knn(train.X, test.X, train.Y, k=18)
knn.pred18
table18 = table(knn.pred18, test.Y)
table18
#k=19
knn.pred19 = knn(train.X, test.X, train.Y, k=19)
knn.pred19
table19 = table(knn.pred19, test.Y)
table19
#k=20
knn.pred20 = knn(train.X, test.X, train.Y, k=20)
knn.pred20
table20 = table(knn.pred20, test.Y)
table20
# 5
library(boot)
#Linear Model
lm.fit1 = glm(y~x1+x2, data=train)
cv.error.lm = cv.glm(train, lm.fit1, K=10)
cv.error.lm$delta[1]
cv.error.lm$delta[2]
#Logistic Model
glm.fit1 = glm(y~x1+x2, data=train, family=binomial)
cv.error.glm = cv.glm(train, glm.fit1, K=10)
cv.error.glm$delta[1]
cv.error.glm$delta[2]
#KNN
knn.cv = knn.cv(data = train.X, label = train.Y, k=7, p=10, method="classification")
names(knn.cv)
|
#Simple Line Plot
v <- c(8,14,26,5,43)
plot(v, type = "o")
# Line Plot with title, color & labels
v <- c(12,1,25,42,56,10,20)
plot(v, type = "o", xlab = "Month", ylab = "Rain Fall", col= "red", main = "Rain Fall Chart")
# Line Plot With Multiple Lines
v <- c(12,15,19,29,30,45)
t <- c(14,16,18,25,34,40)
f <- c(16,17,25,29,18,22)
plot(v , type = "o",xlab = "Month", ylab = "Rain Fall", col = "red", main = "Rain Fall Chart")
lines(t, type = "o", col = "blue")
lines(f, type = "o", col = "green")
|
/Data_Visualization_R/Data visulization in R_Line plot.R
|
no_license
|
balaso4k/Data_Science_R
|
R
| false
| false
| 544
|
r
|
#Simple Line Plot
v <- c(8,14,26,5,43)
plot(v, type = "o")
# Line Plot with title, color & labels
v <- c(12,1,25,42,56,10,20)
plot(v, type = "o", xlab = "Month", ylab = "Rain Fall", col= "red", main = "Rain Fall Chart")
# Line Plot With Multiple Lines
v <- c(12,15,19,29,30,45)
t <- c(14,16,18,25,34,40)
f <- c(16,17,25,29,18,22)
plot(v , type = "o",xlab = "Month", ylab = "Rain Fall", col = "red", main = "Rain Fall Chart")
lines(t, type = "o", col = "blue")
lines(f, type = "o", col = "green")
|
# Intro to ggplot ----
library(tidyverse)
james <- read.csv('lebronjames_career.csv')
line_graph <- james %>%
mutate(Season = as.numeric(substr(Season, 1, 4))) %>% # Converting season from factor to numeric
select(Season, PTS) %>%
drop_na() # Drop career row
# Line chart ----
ggplot(data = line_graph, aes(x = Season, y = PTS)) +
geom_line()
# geom_line(color = 'red', size = 2) +
# labs(title = 'Lebron James Scoring by Season',
# subtitle = 'Looking at Lebrons Points per 100 possessions over the course of his career',
# x = '',
# y = 'Points per 100 Possessions',
# caption = 'Source: Basketball Reference') +
# scale_x_continuous(breaks = seq(2003,2019,1)) +
# theme_minimal() +
# theme(plot.title = element_text(face = 'bold'),
# panel.grid.minor.x = element_blank(),
# axis.text.x = element_text(angle = 45))
# Bar Chart ----
off_def <- read.csv('offense_defense.csv') %>%
filter(MP >= 1000)
head(off_def, 10)
defense_by_position <- off_def %>%
group_by(Pos) %>%
summarise(DBPM = mean(DBPM))
head(defense_by_position, 5)
ggplot(data = defense_by_position, aes(x = Pos, DBPM)) +
geom_bar(stat = 'Identity')
# ggplot(data = defense_by_position, aes(x = reorder(Pos, desc(DBPM)), y = DBPM)) +
# geom_bar(stat = 'Identity', fill = 'royalblue', color = 'navy') +
# labs(title = 'Defensive Performance by Position',
# x = 'Position',
# y = 'Defensive Box Plus/Minus',
# caption = 'Source: Basketball Reference') +
# theme_minimal() +
# theme(plot.title = element_text(size = 16, face = 'bold', hjust = .5),
# axis.title = element_text(face = 'bold'))
# Scatter plot ----
ggplot(data = off_def, aes(x = OWS, y = OBPM)) +
geom_point()
# geom_point(aes(color = Pos, size = MP)) +
# geom_point(color = 'royalblue', size = 3, alpha = .60) +
# geom_smooth(method = 'lm', color = 'red') +
# labs(title = 'Comparing Offensive Value Metrics',
# subtitle = 'Looking at Offensive Win Shares against Offensive Box Plus/Minus',
# x = 'Offensive Win Shares',
# y = 'Offensive Box Plus/Minus',
# caption = 'Source: Basketball Reference') +
# theme_classic() +
# theme(plot.title = element_text(face = 'bold', hjust = .5),
# plot.subtitle = element_text(face = 'italic', hjust = .5),
# axis.title = element_text(face = 'bold'))
#
#
|
/Teaching R at Columbia and NYU/Intro to GGPlot/Intro to GGPlot.R
|
no_license
|
jasonwrosenfeld23/JasonR_project
|
R
| false
| false
| 2,413
|
r
|
# Intro to ggplot ----
library(tidyverse)
james <- read.csv('lebronjames_career.csv')
line_graph <- james %>%
mutate(Season = as.numeric(substr(Season, 1, 4))) %>% # Converting season from factor to numeric
select(Season, PTS) %>%
drop_na() # Drop career row
# Line chart ----
ggplot(data = line_graph, aes(x = Season, y = PTS)) +
geom_line()
# geom_line(color = 'red', size = 2) +
# labs(title = 'Lebron James Scoring by Season',
# subtitle = 'Looking at Lebrons Points per 100 possessions over the course of his career',
# x = '',
# y = 'Points per 100 Possessions',
# caption = 'Source: Basketball Reference') +
# scale_x_continuous(breaks = seq(2003,2019,1)) +
# theme_minimal() +
# theme(plot.title = element_text(face = 'bold'),
# panel.grid.minor.x = element_blank(),
# axis.text.x = element_text(angle = 45))
# Bar Chart ----
off_def <- read.csv('offense_defense.csv') %>%
filter(MP >= 1000)
head(off_def, 10)
defense_by_position <- off_def %>%
group_by(Pos) %>%
summarise(DBPM = mean(DBPM))
head(defense_by_position, 5)
ggplot(data = defense_by_position, aes(x = Pos, DBPM)) +
geom_bar(stat = 'Identity')
# ggplot(data = defense_by_position, aes(x = reorder(Pos, desc(DBPM)), y = DBPM)) +
# geom_bar(stat = 'Identity', fill = 'royalblue', color = 'navy') +
# labs(title = 'Defensive Performance by Position',
# x = 'Position',
# y = 'Defensive Box Plus/Minus',
# caption = 'Source: Basketball Reference') +
# theme_minimal() +
# theme(plot.title = element_text(size = 16, face = 'bold', hjust = .5),
# axis.title = element_text(face = 'bold'))
# Scatter plot ----
ggplot(data = off_def, aes(x = OWS, y = OBPM)) +
geom_point()
# geom_point(aes(color = Pos, size = MP)) +
# geom_point(color = 'royalblue', size = 3, alpha = .60) +
# geom_smooth(method = 'lm', color = 'red') +
# labs(title = 'Comparing Offensive Value Metrics',
# subtitle = 'Looking at Offensive Win Shares against Offensive Box Plus/Minus',
# x = 'Offensive Win Shares',
# y = 'Offensive Box Plus/Minus',
# caption = 'Source: Basketball Reference') +
# theme_classic() +
# theme(plot.title = element_text(face = 'bold', hjust = .5),
# plot.subtitle = element_text(face = 'italic', hjust = .5),
# axis.title = element_text(face = 'bold'))
#
#
|
#!/usr/bin/Rscript
# test_laney_ests.R Author "Nathan Wycoff <nathanbrwycoff@gmail.com>" Date 01.25.2018
## In order to evaluate ARL properties of the laney chart with known parameters,
## we need to understand how beta-binomial quantities translate to the population
## quantities mentioned in Laney's paper, namely sigma_z and sigma_p.
## This script verifies analytical derivations of these quantities through sims.
source('./charts/laney_chart.R')
source('lib.R')
## Double check that under a model with no random effects, we eventually estimate
## that the z-scale variation is indeed 1.
chart <- laney_chart()
m <- 1000
N <- rpois(m,10)
X <- rbinom(m, N, 0.5)
est_params(chart, X, N)
## Check that, as alpha, beta \to \infty with \alpha / (\alpha + \beta) fixed, we have that sig_z \to 1.
alpha <- 2e10
beta <- 1e10
bb_mm('laney', alpha, beta, 3)
## Check that for large sample sizes, the sample estimates agree with the translated
## parameters.
## Interesting note: the moving range estimate for variance is only good if p is no
## close to 0 or 1, as this skews the distribution of Z scores.
chart <- laney_chart()
alpha <- 20
beta <- 10
m <- 1e4
n.mu <- 1e4
N <- 1+rpois(m,n.mu-1)
rhos <- rbeta(m, alpha, beta)
X <- rbinom(m, N, rhos)
#Translated Params
bb_mm('laney', alpha, beta, n.mu)
#Estiamtes
est_params(chart, X, N)
|
/tests/laney_chart_test.R
|
no_license
|
NathanWycoff/ODBinQC
|
R
| false
| false
| 1,344
|
r
|
#!/usr/bin/Rscript
# test_laney_ests.R Author "Nathan Wycoff <nathanbrwycoff@gmail.com>" Date 01.25.2018
## In order to evaluate ARL properties of the laney chart with known parameters,
## we need to understand how beta-binomial quantities translate to the population
## quantities mentioned in Laney's paper, namely sigma_z and sigma_p.
## This script verifies analytical derivations of these quantities through sims.
source('./charts/laney_chart.R')
source('lib.R')
## Double check that under a model with no random effects, we eventually estimate
## that the z-scale variation is indeed 1.
chart <- laney_chart()
m <- 1000
N <- rpois(m,10)
X <- rbinom(m, N, 0.5)
est_params(chart, X, N)
## Check that, as alpha, beta \to \infty with \alpha / (\alpha + \beta) fixed, we have that sig_z \to 1.
alpha <- 2e10
beta <- 1e10
bb_mm('laney', alpha, beta, 3)
## Check that for large sample sizes, the sample estimates agree with the translated
## parameters.
## Interesting note: the moving range estimate for variance is only good if p is no
## close to 0 or 1, as this skews the distribution of Z scores.
chart <- laney_chart()
alpha <- 20
beta <- 10
m <- 1e4
n.mu <- 1e4
N <- 1+rpois(m,n.mu-1)
rhos <- rbeta(m, alpha, beta)
X <- rbinom(m, N, rhos)
#Translated Params
bb_mm('laney', alpha, beta, n.mu)
#Estiamtes
est_params(chart, X, N)
|
rxodeTest(
{
context("Capture which ETAs are in events")
test_that("duration/f ETAs extracted", {
pk <- function() {
tka <- THETA[1]
tcl <- THETA[2]
tv <- THETA[3]
ltk0 <- THETA[4]
lf <- THETA[5]
add.err <- THETA[6]
prop.err <- THETA[7]
ltk2 <- THETA[8]
eta.ka <- ETA[1]
eta.cl <- ETA[2]
eta.v <- ETA[3]
eta.k0 <- ETA[4]
eta.f <- ETA[5]
eta.k2 <- ETA[6]
ka <- exp(tka + eta.ka)
cl <- exp(tcl + eta.cl)
v <- exp(tv + eta.v)
D2 <- exp(ltk0 + eta.k0)
D3 <- exp(ltk2 + eta.k2)
F2 <- 1 / (1 + exp(lf + eta.f))
}
mod <- RxODE({
d / dt(depot) <- -ka * depot
d / dt(center) <- ka * depot - cl / v * center
f(depot) <- 1 - F2
f(center) <- F2
alag(depot) <- D2
dur(center) <- D3
cp <- center / v
cmt(cp)
nlmixr_pred <- cp
})
pred <- function() {
return(nlmixr_pred)
}
err <- function() {
return(add(add.err) + prop(prop.err))
}
pk2 <- rxSymPySetupPred(mod, predfn = pred, pkpars = pk, err = err)
expect_false(is.null(pk2$pred.nolhs))
expect_equal(pk2$eventTheta, c(0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L))
expect_equal(pk2$eventEta, c(0L, 0L, 0L, 1L, 1L, 1L))
expect_equal(pk2$inner$params, pk2$pred.nolhs$params)
})
},
test = "focei"
)
|
/tests/testthat/test-dur-sens.R
|
no_license
|
cran/RxODE
|
R
| false
| false
| 1,529
|
r
|
rxodeTest(
{
context("Capture which ETAs are in events")
test_that("duration/f ETAs extracted", {
pk <- function() {
tka <- THETA[1]
tcl <- THETA[2]
tv <- THETA[3]
ltk0 <- THETA[4]
lf <- THETA[5]
add.err <- THETA[6]
prop.err <- THETA[7]
ltk2 <- THETA[8]
eta.ka <- ETA[1]
eta.cl <- ETA[2]
eta.v <- ETA[3]
eta.k0 <- ETA[4]
eta.f <- ETA[5]
eta.k2 <- ETA[6]
ka <- exp(tka + eta.ka)
cl <- exp(tcl + eta.cl)
v <- exp(tv + eta.v)
D2 <- exp(ltk0 + eta.k0)
D3 <- exp(ltk2 + eta.k2)
F2 <- 1 / (1 + exp(lf + eta.f))
}
mod <- RxODE({
d / dt(depot) <- -ka * depot
d / dt(center) <- ka * depot - cl / v * center
f(depot) <- 1 - F2
f(center) <- F2
alag(depot) <- D2
dur(center) <- D3
cp <- center / v
cmt(cp)
nlmixr_pred <- cp
})
pred <- function() {
return(nlmixr_pred)
}
err <- function() {
return(add(add.err) + prop(prop.err))
}
pk2 <- rxSymPySetupPred(mod, predfn = pred, pkpars = pk, err = err)
expect_false(is.null(pk2$pred.nolhs))
expect_equal(pk2$eventTheta, c(0L, 0L, 0L, 1L, 1L, 0L, 0L, 1L))
expect_equal(pk2$eventEta, c(0L, 0L, 0L, 1L, 1L, 1L))
expect_equal(pk2$inner$params, pk2$pred.nolhs$params)
})
},
test = "focei"
)
|
# A test set for the weather simulator
#
# Created by lshang on Aug 18, 2016
#
test.simulator <- function() {
checkTrue(TRUE, define_constants())
checkEquals(5, length(generate_timestamps(5)))
checkEquals(4, length(get_config()))
}
test.markovchain <- function() {
sourceCpp("markovchain.cpp")
mat <- matrix(1)
checkEquals(0, generate_sequence(mat, 1))
mat <- matrix(c(1/3, 1/3, 1/3, 1/3, 1/3, 1/3, 1/3, 1/3, 1/3), nrow = 3, ncol = 3)
sequence <- generate_sequence(mat,20)
checkEquals(20, length(sequence))
}
test.deactivation <- function() {
DEACTIVATED('Deactivating this test function')
}
|
/1.R
|
no_license
|
lshang0311/fun-with-weather
|
R
| false
| false
| 619
|
r
|
# A test set for the weather simulator
#
# Created by lshang on Aug 18, 2016
#
test.simulator <- function() {
checkTrue(TRUE, define_constants())
checkEquals(5, length(generate_timestamps(5)))
checkEquals(4, length(get_config()))
}
test.markovchain <- function() {
sourceCpp("markovchain.cpp")
mat <- matrix(1)
checkEquals(0, generate_sequence(mat, 1))
mat <- matrix(c(1/3, 1/3, 1/3, 1/3, 1/3, 1/3, 1/3, 1/3, 1/3), nrow = 3, ncol = 3)
sequence <- generate_sequence(mat,20)
checkEquals(20, length(sequence))
}
test.deactivation <- function() {
DEACTIVATED('Deactivating this test function')
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810623743159e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615834862-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 270
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810623743159e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
# Breakpoint --------------------------------------------------------------
load(file="intermediate_codes")
# Transfer daytime density ------------------------------------------------
d = read.csv("./data/daytime_density.csv")
test = d %>%
dplyr::select(GEOID10, daytime_pop, daytime_density, travel_to, DP0010001) %>%
dplyr::rename(cen_code = GEOID10) %>%
merge(.,test,by="cen_code")
# Transfer RE vecs --------------------------------------------------------
d = read.csv(file="./data/realestate_all.csv")
test %<>% merge(d %>% dplyr::rename(zip = L1),by="zip", all=T)
# Transfer household income -----------------------------------------------
d = read.csv("./data/attainment_income_2016/ACS_16_5YR_S1903_with_ann.csv", stringsAsFactors = F) %>%
dplyr::select(HC02_EST_VC02, HC02_EST_VC22, GEO.id2) %>%
lapply(function(x) gsub("(X)|\\*\\*|-", NA, x)) %>%
data.frame %>%
dplyr::rename(medinc = HC02_EST_VC02, faminc = HC02_EST_VC22, cen_code = GEO.id2) %>%
mutate(cen_code = as.numeric(as.character(cen_code))) %>%
mutate_at(vars(-cen_code),funs(as.numeric(as.character(.))))
test %<>% left_join(d)
# Transfer ed attainment --------------------------------------------------
d = read.csv("./data/attainment_income_2016/ACS_16_5YR_S1501_with_ann.csv", stringsAsFactors = F) %>%
dplyr::select(HC02_EST_VC03, HC02_EST_VC06, HC02_EST_VC08,HC02_EST_VC15, HC01_EST_VC08, HC01_EST_VC02, GEO.id2) %>%
dplyr::rename(b_hs = HC02_EST_VC03, hs = HC02_EST_VC06, ba = HC02_EST_VC08, phd = HC02_EST_VC15, oldpop = HC01_EST_VC08, youngpop = HC01_EST_VC02) %>%
lapply(function(x) gsub("(X)|\\*\\*|-", NA, x)) %>%
data.frame %>%
dplyr::rename(cen_code = GEO.id2) %>%
mutate(cen_code = as.numeric(as.character(cen_code))) %>%
mutate_at(vars(-cen_code),funs(as.numeric(as.character(.))))
test %<>% left_join(d)
# Transfer rent vacancy ---------------------------------------------------
# Transfer total population -----------------------------------------------
# Get distance matrix and calculate neighbor rasters ----------------------
distmat = pointDistance(centroids[,1:2],lonlat=F)
base_distance = .0028
n_dim = 4732
distance_lookup = list(
sapply(1:n_dim, function(x) which(distmat[x,1:x-1] <= base_distance)),
sapply(1:n_dim, function(x) which(distmat[x,1:x-1] <= base_distance*2)),
sapply(1:n_dim, function(x) which(distmat[x,1:x-1] <= base_distance*3)),
sapply(1:n_dim, function(x) which(distmat[x,1:x-1] <= base_distance*4))
)
test$dist1 = test$code %>%
sapply(function(x) test$cafe[distance_lookup[[1]][x] %>% unlist()] %>% sum(na.rm=T))
test$dist2 = test$code %>%
sapply(function(x) test$cafe[distance_lookup[[2]][x] %>% unlist()] %>% sum(na.rm=T))
test$dist3 = test$code %>%
sapply(function(x) test$cafe[distance_lookup[[3]][x] %>% unlist()] %>% sum(na.rm=T))
test$dist4 = test$code %>%
sapply(function(x) test$cafe[distance_lookup[[4]][x] %>% unlist()] %>% sum(na.rm=T))
# Drop all cells not on land ----------------------------------------------
test %<>% drop_na(zone)
write.csv(test, "./data/raw_combined.csv")
# Modeling ----------------------------------------------------------------
mat = test %>% dplyr::select(-cen_code, -zip, -zone, -code, -hood, -x, -y, -bakery)
naind = mat %>% apply(2,function(x) is.na(x) %>% which() %>% length)
filtmat = mat[,(naind < 80)] %>% dplyr::select(-contains("MOE"))
filtmat %<>% mice::mice(1) %>% mice::complete()
filtmat$zone = as.factor(test$zone)
filtmat$hood = as.factor(test$hood)
write.csv(filtmat, "./data/filt_mat.csv")
mod_mat = model.matrix(formula(~ .), filtmat)
write.csv(mod_mat[,-7], "./data/test.csv")
write.csv(mod_mat[,7], "./data/cafes.csv")
|
/source/2b_data_merge.R
|
no_license
|
Ritella/cafecity
|
R
| false
| false
| 3,699
|
r
|
# Breakpoint --------------------------------------------------------------
load(file="intermediate_codes")
# Transfer daytime density ------------------------------------------------
d = read.csv("./data/daytime_density.csv")
test = d %>%
dplyr::select(GEOID10, daytime_pop, daytime_density, travel_to, DP0010001) %>%
dplyr::rename(cen_code = GEOID10) %>%
merge(.,test,by="cen_code")
# Transfer RE vecs --------------------------------------------------------
d = read.csv(file="./data/realestate_all.csv")
test %<>% merge(d %>% dplyr::rename(zip = L1),by="zip", all=T)
# Transfer household income -----------------------------------------------
d = read.csv("./data/attainment_income_2016/ACS_16_5YR_S1903_with_ann.csv", stringsAsFactors = F) %>%
dplyr::select(HC02_EST_VC02, HC02_EST_VC22, GEO.id2) %>%
lapply(function(x) gsub("(X)|\\*\\*|-", NA, x)) %>%
data.frame %>%
dplyr::rename(medinc = HC02_EST_VC02, faminc = HC02_EST_VC22, cen_code = GEO.id2) %>%
mutate(cen_code = as.numeric(as.character(cen_code))) %>%
mutate_at(vars(-cen_code),funs(as.numeric(as.character(.))))
test %<>% left_join(d)
# Transfer ed attainment --------------------------------------------------
d = read.csv("./data/attainment_income_2016/ACS_16_5YR_S1501_with_ann.csv", stringsAsFactors = F) %>%
dplyr::select(HC02_EST_VC03, HC02_EST_VC06, HC02_EST_VC08,HC02_EST_VC15, HC01_EST_VC08, HC01_EST_VC02, GEO.id2) %>%
dplyr::rename(b_hs = HC02_EST_VC03, hs = HC02_EST_VC06, ba = HC02_EST_VC08, phd = HC02_EST_VC15, oldpop = HC01_EST_VC08, youngpop = HC01_EST_VC02) %>%
lapply(function(x) gsub("(X)|\\*\\*|-", NA, x)) %>%
data.frame %>%
dplyr::rename(cen_code = GEO.id2) %>%
mutate(cen_code = as.numeric(as.character(cen_code))) %>%
mutate_at(vars(-cen_code),funs(as.numeric(as.character(.))))
test %<>% left_join(d)
# Transfer rent vacancy ---------------------------------------------------
# Transfer total population -----------------------------------------------
# Get distance matrix and calculate neighbor rasters ----------------------
distmat = pointDistance(centroids[,1:2],lonlat=F)
base_distance = .0028
n_dim = 4732
distance_lookup = list(
sapply(1:n_dim, function(x) which(distmat[x,1:x-1] <= base_distance)),
sapply(1:n_dim, function(x) which(distmat[x,1:x-1] <= base_distance*2)),
sapply(1:n_dim, function(x) which(distmat[x,1:x-1] <= base_distance*3)),
sapply(1:n_dim, function(x) which(distmat[x,1:x-1] <= base_distance*4))
)
test$dist1 = test$code %>%
sapply(function(x) test$cafe[distance_lookup[[1]][x] %>% unlist()] %>% sum(na.rm=T))
test$dist2 = test$code %>%
sapply(function(x) test$cafe[distance_lookup[[2]][x] %>% unlist()] %>% sum(na.rm=T))
test$dist3 = test$code %>%
sapply(function(x) test$cafe[distance_lookup[[3]][x] %>% unlist()] %>% sum(na.rm=T))
test$dist4 = test$code %>%
sapply(function(x) test$cafe[distance_lookup[[4]][x] %>% unlist()] %>% sum(na.rm=T))
# Drop all cells not on land ----------------------------------------------
test %<>% drop_na(zone)
write.csv(test, "./data/raw_combined.csv")
# Modeling ----------------------------------------------------------------
mat = test %>% dplyr::select(-cen_code, -zip, -zone, -code, -hood, -x, -y, -bakery)
naind = mat %>% apply(2,function(x) is.na(x) %>% which() %>% length)
filtmat = mat[,(naind < 80)] %>% dplyr::select(-contains("MOE"))
filtmat %<>% mice::mice(1) %>% mice::complete()
filtmat$zone = as.factor(test$zone)
filtmat$hood = as.factor(test$hood)
write.csv(filtmat, "./data/filt_mat.csv")
mod_mat = model.matrix(formula(~ .), filtmat)
write.csv(mod_mat[,-7], "./data/test.csv")
write.csv(mod_mat[,7], "./data/cafes.csv")
|
library(ggplot2)
library(MASS)
library(nlme)
library(DBI)
library(sp)
library(raster)
library(maptools)
library(mgcv)
library(rgeos)
library(maps)
library(mapdata)
library(RMySQL)
library(rgdal)
library(gstat)
library(gdalUtils)
library(foreach)
library(doParallel)
library(readxl)
library(HousePC)
source('R/boundary.R',encoding = 'utf-8')
source('R/calLevel.R',encoding = 'utf-8')
source('R/calLevel2.R',encoding = 'utf-8')
source('R/calLevelPost.R',encoding = 'utf-8')
source('R/calLevelPost2.R',encoding = 'utf-8')
source('R/grid.R',encoding = 'utf-8')
source('R/hp_CHN.R',encoding = 'utf-8')
source('R/hp_CHNPost.R',encoding = 'utf-8')
source('R/hp_city.R',encoding = 'utf-8')
source('R/hp_cityPost.R',encoding = 'utf-8')
source('R/krig.R',encoding = 'utf-8')
source('R/preprocess.R',encoding = 'utf-8')
source('R/preprocess2.R',encoding = 'utf-8')
source('R/preprocessPost.R',encoding = 'utf-8')
source('R/preprocessPost2.R',encoding = 'utf-8')
source('R/prsp.R',encoding = 'utf-8')
source('R/readpr.R',encoding = 'utf-8')
|
/R/libraries.R
|
no_license
|
Menglinucas/HouseLevel
|
R
| false
| false
| 1,065
|
r
|
library(ggplot2)
library(MASS)
library(nlme)
library(DBI)
library(sp)
library(raster)
library(maptools)
library(mgcv)
library(rgeos)
library(maps)
library(mapdata)
library(RMySQL)
library(rgdal)
library(gstat)
library(gdalUtils)
library(foreach)
library(doParallel)
library(readxl)
library(HousePC)
source('R/boundary.R',encoding = 'utf-8')
source('R/calLevel.R',encoding = 'utf-8')
source('R/calLevel2.R',encoding = 'utf-8')
source('R/calLevelPost.R',encoding = 'utf-8')
source('R/calLevelPost2.R',encoding = 'utf-8')
source('R/grid.R',encoding = 'utf-8')
source('R/hp_CHN.R',encoding = 'utf-8')
source('R/hp_CHNPost.R',encoding = 'utf-8')
source('R/hp_city.R',encoding = 'utf-8')
source('R/hp_cityPost.R',encoding = 'utf-8')
source('R/krig.R',encoding = 'utf-8')
source('R/preprocess.R',encoding = 'utf-8')
source('R/preprocess2.R',encoding = 'utf-8')
source('R/preprocessPost.R',encoding = 'utf-8')
source('R/preprocessPost2.R',encoding = 'utf-8')
source('R/prsp.R',encoding = 'utf-8')
source('R/readpr.R',encoding = 'utf-8')
|
data = read.csv("mydata.csv")
data['Col1']
cbind(data,Col4=c(1,2,3,4))
rbind(data,list(1,2,3))
|
/Lesson01/Exercise02/Performing_operations_on_Dataframe.R
|
permissive
|
MeiRey/Practical-Machine-Learning-with-R
|
R
| false
| false
| 102
|
r
|
data = read.csv("mydata.csv")
data['Col1']
cbind(data,Col4=c(1,2,3,4))
rbind(data,list(1,2,3))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CurvePredict.R
\name{curve.predict}
\alias{curve.predict}
\title{Curve predictobj}
\usage{
\method{curve}{predict}(predict.obj, caserow = 1, level, acc = 0.01, ...)
}
\description{
Curve predictobj
}
|
/man/curve.predict.Rd
|
no_license
|
StatEvidence/ROC
|
R
| false
| true
| 278
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CurvePredict.R
\name{curve.predict}
\alias{curve.predict}
\title{Curve predictobj}
\usage{
\method{curve}{predict}(predict.obj, caserow = 1, level, acc = 0.01, ...)
}
\description{
Curve predictobj
}
|
## The two functions below taken together calculate the inverse of a matrix once,
## cache its inverse and then catch this inverse whenever it is needed.
# This procedure prevents repeated calculation of the inverse if the
# contents of the matrix remain unmodified.
# cacheSolve performs the actual operations (inversing, caching and catching),
# while makeCacheMatrix creates the enclosure for the functions that will perform the operations for a given matrix.
## makeCacheMatrix takes a matrix as input argument.
# It returns a list with four functions
# set() assigns the value of the matrix
# get() gets the value of the matrix
# setinv() sets the value of the inverse
# getinv() gets the value of the inverse
# The following example code shows that the values of x and m when get()
# will be called are indeed the values of its enclosure.
# > first_matrix <- matrix(rnorm(4),nrow= 2)
# > first_matrix_list <- makeCacheMatrix(first_matrix)
# > identical(get("x", envir = environment(first_matrix_list$get)),first_matrix)
# [1] TRUE
# > get("r", envir = environment(first_matrix_list$set))
# NULL
#The same results can be obtained for the other functions.
makeCacheMatrix <- function(x = matrix()) {
r <- NULL
set <- function(y){
x <<- y
r <<- NULL
}
get <- function() x
setinv <- function(invmat) r <<- invmat
getinv <- function() r
list(set = set, get = get, setinv = setinv, getinv = getinv )
}
## cacheSolve takes the return value of makeCacheMatrix (the list of functions)
# as input argument.
# As a first step it "gets" the value of the inverse matrux
# When this is performed the first time, the inverse matrix does not exist yet,
# the functions "gets" the original matrix (i.e. gets the value as it was in the
# enclosure of get()), calculates its inverse,
# "caches" the value of the inverse (i.e. assigns the value of the inverse
# to the enclosure of setinv() ), and returns its value:
# > solution <- cacheSolve(first_matrix_list)
# > identical(get("r", envir = environment(first_matrix_list$setinv)), solution)
# [1] TRUE
# When the function is called next time, it "gets" the inverse (which has been
# cached in the first step), and returns it immediately, with a message clarifying
# that it is getting the "cached" version.
# > solution2 <- cacheSolve(first_matrix_list)
# getting cached data
# > identical(get("r", envir = environment(first_matrix_list$setinv)), solution2)
# [1] TRUE
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
r <- x$getinv()
if(!is.null(r)){
message("getting cached data")
return(r)
}
data <- x$get()
r <- solve(data, ...)
x$setinv(r)
r
}
|
/cachematrix.R
|
no_license
|
LaurentFranckx/ProgrammingAssignment2
|
R
| false
| false
| 2,912
|
r
|
## The two functions below taken together calculate the inverse of a matrix once,
## cache its inverse and then catch this inverse whenever it is needed.
# This procedure prevents repeated calculation of the inverse if the
# contents of the matrix remain unmodified.
# cacheSolve performs the actual operations (inversing, caching and catching),
# while makeCacheMatrix creates the enclosure for the functions that will perform the operations for a given matrix.
## makeCacheMatrix takes a matrix as input argument.
# It returns a list with four functions
# set() assigns the value of the matrix
# get() gets the value of the matrix
# setinv() sets the value of the inverse
# getinv() gets the value of the inverse
# The following example code shows that the values of x and m when get()
# will be called are indeed the values of its enclosure.
# > first_matrix <- matrix(rnorm(4),nrow= 2)
# > first_matrix_list <- makeCacheMatrix(first_matrix)
# > identical(get("x", envir = environment(first_matrix_list$get)),first_matrix)
# [1] TRUE
# > get("r", envir = environment(first_matrix_list$set))
# NULL
#The same results can be obtained for the other functions.
makeCacheMatrix <- function(x = matrix()) {
r <- NULL
set <- function(y){
x <<- y
r <<- NULL
}
get <- function() x
setinv <- function(invmat) r <<- invmat
getinv <- function() r
list(set = set, get = get, setinv = setinv, getinv = getinv )
}
## cacheSolve takes the return value of makeCacheMatrix (the list of functions)
# as input argument.
# As a first step it "gets" the value of the inverse matrux
# When this is performed the first time, the inverse matrix does not exist yet,
# the functions "gets" the original matrix (i.e. gets the value as it was in the
# enclosure of get()), calculates its inverse,
# "caches" the value of the inverse (i.e. assigns the value of the inverse
# to the enclosure of setinv() ), and returns its value:
# > solution <- cacheSolve(first_matrix_list)
# > identical(get("r", envir = environment(first_matrix_list$setinv)), solution)
# [1] TRUE
# When the function is called next time, it "gets" the inverse (which has been
# cached in the first step), and returns it immediately, with a message clarifying
# that it is getting the "cached" version.
# > solution2 <- cacheSolve(first_matrix_list)
# getting cached data
# > identical(get("r", envir = environment(first_matrix_list$setinv)), solution2)
# [1] TRUE
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
r <- x$getinv()
if(!is.null(r)){
message("getting cached data")
return(r)
}
data <- x$get()
r <- solve(data, ...)
x$setinv(r)
r
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/date2day.R
\name{date2day_fun}
\alias{date2day_fun}
\title{Date2Day function}
\usage{
date2day_fun(year, month, day)
}
\arguments{
\item{year}{Enter the year in an int format, eg. 1989}
\item{month}{Enter the month in an int format, eg. 8}
\item{day}{Enter the day in an int format, eg. 25}
}
\description{
This function allows you to calculate the day based on date.
The formula used in the function is kim Larsson calculation formula.
}
\examples{
date2day_fun(2018, 5, 28)
}
\keyword{Date2Day}
|
/man/date2day_fun.Rd
|
no_license
|
Yiguan/Date2Day
|
R
| false
| true
| 578
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/date2day.R
\name{date2day_fun}
\alias{date2day_fun}
\title{Date2Day function}
\usage{
date2day_fun(year, month, day)
}
\arguments{
\item{year}{Enter the year in an int format, eg. 1989}
\item{month}{Enter the month in an int format, eg. 8}
\item{day}{Enter the day in an int format, eg. 25}
}
\description{
This function allows you to calculate the day based on date.
The formula used in the function is kim Larsson calculation formula.
}
\examples{
date2day_fun(2018, 5, 28)
}
\keyword{Date2Day}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{simulate.sam}
\alias{simulate.sam}
\title{Simulate from a sam object}
\usage{
\method{simulate}{sam}(object, nsim = 1, seed = NULL,
full.data = TRUE, ...)
}
\arguments{
\item{object}{sam fitted object as returned from the \code{\link{sam.fit}} function}
\item{nsim}{number of response lists to simulate. Defaults to 1.}
\item{seed}{random number seed}
\item{full.data}{logical, should each inner list contain a full list of data. Defaults to TRUE}
\item{...}{extra arguments}
}
\value{
returns a list of lists. The outer list has length \code{nsim}. Each inner list contains simulated values of \code{logF}, \code{logN}, and \code{obs} with dimensions equal to those parameters.
}
\description{
Simulate from a sam object
}
\details{
simulates data sets from the model fitted and conditioned on the random effects estimated
}
|
/stockassessment/man/simulate.sam.Rd
|
no_license
|
iamdavecampbell/SAM
|
R
| false
| true
| 926
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{simulate.sam}
\alias{simulate.sam}
\title{Simulate from a sam object}
\usage{
\method{simulate}{sam}(object, nsim = 1, seed = NULL,
full.data = TRUE, ...)
}
\arguments{
\item{object}{sam fitted object as returned from the \code{\link{sam.fit}} function}
\item{nsim}{number of response lists to simulate. Defaults to 1.}
\item{seed}{random number seed}
\item{full.data}{logical, should each inner list contain a full list of data. Defaults to TRUE}
\item{...}{extra arguments}
}
\value{
returns a list of lists. The outer list has length \code{nsim}. Each inner list contains simulated values of \code{logF}, \code{logN}, and \code{obs} with dimensions equal to those parameters.
}
\description{
Simulate from a sam object
}
\details{
simulates data sets from the model fitted and conditioned on the random effects estimated
}
|
#Creating Plot 2 for Week 1 of Exploratory data analysis assignment
#clean workspace
rm(list=ls())
#set working directory
setwd("C:\\Rdata\\Coursera")
#read in data
dat<-read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors =FALSE)
dat$date<-as.Date(dat$Date, format="%d/%m/%Y")
date1<-as.Date("2007-02-01")
date2<-as.Date("2007-02-02")
dat2<-subset(dat, dat$date>=date1 & dat$date<=date2)
dat2$DateTime<-as.POSIXct(paste(dat2$date, dat2$Time), format="%Y-%m-%d %H:%M:%S")
#plot 2
png("Plot2.png", 480, 480)
with(dat2, plot(DateTime,Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab=""))
dev.off()
|
/plot2.R
|
no_license
|
Skiriani/ExData_Plotting1
|
R
| false
| false
| 684
|
r
|
#Creating Plot 2 for Week 1 of Exploratory data analysis assignment
#clean workspace
rm(list=ls())
#set working directory
setwd("C:\\Rdata\\Coursera")
#read in data
dat<-read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors =FALSE)
dat$date<-as.Date(dat$Date, format="%d/%m/%Y")
date1<-as.Date("2007-02-01")
date2<-as.Date("2007-02-02")
dat2<-subset(dat, dat$date>=date1 & dat$date<=date2)
dat2$DateTime<-as.POSIXct(paste(dat2$date, dat2$Time), format="%Y-%m-%d %H:%M:%S")
#plot 2
png("Plot2.png", 480, 480)
with(dat2, plot(DateTime,Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab=""))
dev.off()
|
network.igraph.unpack = function( g, spec ) {
# upack the attributes
att = data.frame( index=1:length(V(g)) ) # dummy index to get the right number of rows
for ( v in list.vertex.attributes(g) ) {
att[,v] = get.vertex.attribute(g, v)
}
return (att)
}
|
/R/network.igraph.unpack.r
|
permissive
|
jae0/bio.taxonomy
|
R
| false
| false
| 275
|
r
|
network.igraph.unpack = function( g, spec ) {
# upack the attributes
att = data.frame( index=1:length(V(g)) ) # dummy index to get the right number of rows
for ( v in list.vertex.attributes(g) ) {
att[,v] = get.vertex.attribute(g, v)
}
return (att)
}
|
#### Common Resources ####
pred_template_load<- function(pred_template_dir){
if(FALSE){
tar_load(pred_template_dir)
}
# Load the raster template gird
pred_template_rast<- raster(paste(pred_template_dir, "mod_pred_template.grd", sep = "/"))
# Convert it to a data frame
pred_template_df<- as.data.frame(pred_template_rast, xy = TRUE) %>%
drop_na() %>%
dplyr::select(., x, y)
names(pred_template_df)<- c("longitude", "latitude")
# Return it
return(pred_template_df)
}
high_res_load <- function(high_res_dir) {
high_res<- raster(paste(high_res_dir, "HighResTemplate.grd", sep = "/"))
return(high_res)
}
#### Functions ####
####
#' @title Make VAST prediction dataframe
#'
#' @description This function creates a dataframe of prediction covariates to combine with the other VAST data
#'
#' @param predict_covariates_stack_agg = The directory holding processed covariate raster stacks
#' @param mask = Shapefile mask
#' @param summarize = Currently, either "annual" or "seasonal" to indicate whether the each dynamic raster stack should be summarized to an annual or seasonal time scale
#' @param ensemble_stat = Either the climate model ensemble statistic to use when working with climate model projections, or NULL. This is only used in naming the output file
#' @param fit_year_min
#' @param fit_year_max
#' @param pred_years
#' @param out_dir = Directory to save the prediction dataframe
#'
#' @return A dataframe with prediction information. This file is also saved in out_dir.
#'
#' @export
make_vast_predict_df<- function(predict_covariates_stack_agg, extra_covariates_stack, covs_rescale = c("Depth", "BS_seasonal", "BT_seasonal", "SS_seasonal", "SST_seasonal"), rescale_params, depth_cut, mask, summarize, ensemble_stat, fit_seasons, fit_year_min, fit_year_max, pred_years, out_dir){
# For debugging
if(FALSE){
tar_load(predict_covariates_stack_agg_out)
predict_covariates_stack_agg<- predict_covariates_stack_agg_out
tar_load(static_covariates_stack)
extra_covariates_stack = static_covariates_stack
tar_load(rescale_params)
tar_load(region_shapefile)
mask = region_shapefile
summarize<- "seasonal"
ensemble_stat<- "mean"
fit_year_min = fit_year_min
fit_year_max = fit_year_max
pred_years = pred_years
out_dir = here::here("scratch/aja/TargetsSDM/data/predict")
covs_rescale = c("Depth", "BS_seasonal", "BT_seasonal", "SS_seasonal", "SST_seasonal")
}
####
## Need to figure out what to do about depth here!!!
# Get raster stack covariate files
rast_files_load<- list.files(predict_covariates_stack_agg, pattern = paste0(summarize, "_", ensemble_stat, ".grd$"), full.names = TRUE)
# Get variable names
cov_names_full<- list.files(predict_covariates_stack_agg, pattern = paste0(summarize, "_", ensemble_stat, ".grd$"), full.names = FALSE)
predict_covs_names<- gsub(paste("_", ensemble_stat, ".grd$", sep = ""), "", gsub("predict_stack_", "", cov_names_full))
# Looping through prediction stack time steps
for(i in 1:nlayers(raster::stack(rast_files_load[1]))){
# Get the time index
time_ind<- i
# Load corresponding raster layers matching the time index
pred_covs_stack_temp<- rotate(raster::stack(raster::stack(rast_files_load[1])[[time_ind]], raster::stack(rast_files_load[2])[[time_ind]], raster::stack(rast_files_load[3])[[time_ind]], raster::stack(rast_files_load[4])[[time_ind]]))
# Mask out values outside area of interest
pred_covs_stack_temp<- raster::mask(pred_covs_stack_temp, mask = mask)
# Some processing to keep observations within our area of interest and get things in a "tidy-er" prediction dataframe
time_name<- sub('.[^.]*$', '', names(pred_covs_stack_temp))
names(pred_covs_stack_temp)<- paste(time_name, predict_covs_names, sep = "_")
pred_covs_df_temp<- as.data.frame(pred_covs_stack_temp, xy = TRUE) %>%
drop_na()
colnames(pred_covs_df_temp)[2:ncol(pred_covs_df_temp)]<- gsub("X", "", gsub("[.]", "_", colnames(pred_covs_df_temp)[2:ncol(pred_covs_df_temp)]))
colnames(pred_covs_df_temp)[1:2]<- c("DECDEG_BEGLON", "DECDEG_BEGLAT")
pred_covs_df_out_temp<- pred_covs_df_temp %>%
pivot_longer(., -c(DECDEG_BEGLON, DECDEG_BEGLAT), names_to = c("variable"), values_to = "value") %>%
separate(., variable, into = c("EST_YEAR", "SEASON", "variable"), sep = "_", extra = "merge") %>%
pivot_wider(., names_from = variable, values_from = value)
# Adding in some other columns we will want to match up easily with 'vast_data_out'
pred_covs_df_out_temp<- pred_covs_df_out_temp %>%
mutate(., EST_YEAR = as.numeric(EST_YEAR),
DATE = paste(EST_YEAR, case_when(
SEASON == "Winter" ~ "12-16",
SEASON == "Spring" ~ "03-16",
SEASON == "Summer" ~ "07-16",
SEASON == "Fall" ~ "09-16"), sep = "-"),
SURVEY = "DUMMY",
SVVESSEL = "DUMMY",
NMFS_SVSPP = "DUMMY",
DFO_SPEC = "DUMMY",
PRESENCE = 1,
BIOMASS = 1,
ABUNDANCE = 1,
ID = paste("DUMMY", DATE, sep = ""),
PredTF = TRUE)
if(i == 1){
pred_covs_out<- pred_covs_df_out_temp
} else {
pred_covs_out<- bind_rows(pred_covs_out, pred_covs_df_out_temp)
}
}
# Only going to keep information from fit_year_max through pred_years...
pred_covs_out_final<- pred_covs_out %>%
dplyr::filter(., EST_YEAR > fit_year_max & EST_YEAR <= max(pred_years))
# New implementation...
pred_covs_out_final<- pred_covs_out_final %>%
mutate(., #VAST_YEAR_COV = EST_YEAR,
VAST_YEAR_COV = ifelse(EST_YEAR > fit_year_max, fit_year_max, EST_YEAR),
VAST_SEASON = case_when(
SEASON == "Spring" ~ "SPRING",
SEASON == "Summer" ~ "SUMMER",
SEASON == "Fall" ~ "FALL"
),
"VAST_YEAR_SEASON" = paste(EST_YEAR, VAST_SEASON, sep = "_"))
# Subset to only seasons of interest...
pred_covs_out_final<- pred_covs_out_final %>%
filter(., VAST_SEASON %in% fit_seasons)
# Need to account for new levels in year season...
all_years<- seq(from = fit_year_min, to = max(pred_years), by = 1)
all_seasons<- fit_seasons
year_season_set<- expand.grid("SEASON" = all_seasons, "EST_YEAR" = all_years)
all_year_season_levels<- apply(year_season_set[,2:1], MARGIN = 1, FUN = paste, collapse = "_")
pred_covs_out_final<- pred_covs_out_final %>%
mutate(., "VAST_YEAR_SEASON" = factor(VAST_YEAR_SEASON, levels = all_year_season_levels),
"VAST_SEASON" = factor(VAST_SEASON, levels = all_seasons))
# Name rearrangement!
# Keep only what we need..
cov_names<- names(pred_covs_out_final)[-which(names(pred_covs_out_final) %in% c("ID", "DATE", "EST_YEAR", "SEASON", "SURVEY", "SVVESSEL", "DECDEG_BEGLAT", "DECDEG_BEGLON", "NMFS_SVSPP", "DFO_SPEC", "PRESENCE", "BIOMASS", "ABUNDANCE", "PredTF", "VAST_YEAR_COV", "VAST_SEASON", "VAST_YEAR_SEASON"))]
pred_covs_out_final<- pred_covs_out_final %>%
dplyr::select(., "ID", "DATE", "EST_YEAR", "SEASON", "SURVEY", "SVVESSEL", "DECDEG_BEGLAT", "DECDEG_BEGLON", "NMFS_SVSPP", "DFO_SPEC", "PRESENCE", "BIOMASS", "ABUNDANCE", "PredTF", "VAST_YEAR_COV", "VAST_SEASON", "VAST_YEAR_SEASON", {{cov_names}})
# Any extra covariates will likely be static...
if(!is.null(extra_covariates_stack)){
pred_covs_sf<- points_to_sf(pred_covs_out_final)
pred_covs_out_final<- static_extract_wrapper(static_covariates_list = extra_covariates_stack, sf_points = pred_covs_sf, date_col_name = "DATE", df_sf = "df", out_dir = NULL)
}
# Apply depth cut and drop NAs
pred_covs_out_final<- pred_covs_out_final %>%
mutate(., "Depth" = ifelse(Depth > depth_cut, NA, Depth),
"Summarized" = summarize,
"Ensemble_Stat" = ensemble_stat) %>%
drop_na()
# Rescale
if(!is.null(rescale_params)){
for(i in seq_along(covs_rescale)){
match_mean<- rescale_params[which(names(rescale_params) == paste(covs_rescale[i], "Mean", sep = "_"))]
match_sd<- rescale_params[which(names(rescale_params) == paste(covs_rescale[i], "SD", sep = "_"))]
pred_covs_out_final<- pred_covs_out_final %>%
mutate_at(., {{covs_rescale[i]}}, .funs = covariate_rescale_func, type = "AJA", center = match_mean, scale = match_sd)
}
}
saveRDS(pred_covs_out_final, file = paste(out_dir, "/VAST_pred_df_", summarize, "_", ensemble_stat, ".rds", sep = "" ))
return(pred_covs_out_final)
}
#' @title Make VAST seasonal dataset
#'
#' @description This function reads in a tidy model dataset and does some cleaning and processing to generate a new dataset to accommodate fitting a VAST seasonal (or other intra annual) model. These cleaning and processing steps boil down to creating an ordered, continuous, season-year vector, such that the model can then estimate density even in season-years not surveyed.
#'
#' @param tidy_mod_data = A tidy model datafame with all the information (tows, habitat covariates, species occurrences) needed to fit a species distribution model.
#' @param nmfs_species_code = Numeric NMFS species code
#' @param fit_year_min = Minimum year to keep
#' @param fit_year_max = Maximum year to keep
#' @param pred_df = Either NULL or a dataframe with prediction information as created by `make_vast_predict_df`
#' @param out_dir = Directory to save the tidy model dataframe as an .rds file
#'
#' @return A VAST seasonal dataset, ready to be split into a `sample data` dataframe and a `covariate data` dataframe. This file is also saved in out_dir.
#'
#' @export
make_vast_seasonal_data<- function(tidy_mod_data, fit_seasons, nmfs_species_code, fit_year_min, fit_year_max, pred_years, pred_df, out_dir){
# For debugging
if(FALSE){
tar_load(tidy_mod_data)
nmfs_species_code = nmfs_species_code
fit_year_min = fit_year_min
fit_year_max = fit_year_max
fit_seasons = fit_seasons
pred_years = pred_years
tar_load(vast_predict_df)
pred_df = vast_predict_df
out_dir = here::here("scratch/aja/targets_flow/data/combined/")
tar_load(tidy_mod_data)
fit_seasons
}
# Some work on the time span and seasons
# Previous implementation before trying to include both surveys within a given season
# data_temp<- tidy_mod_data %>%
# filter(., NMFS_SVSPP == nmfs_species_code) %>%
# filter(., EST_YEAR >= fit_year_min & EST_YEAR <= fit_year_max) %>%
# mutate(., "VAST_SEASON" = case_when(
# SURVEY == "DFO" & SEASON == "SPRING" ~ "DFO",
# SURVEY == "NMFS" & SEASON == "SPRING" ~ "SPRING",
# SURVEY == "DFO" & SEASON == "SUMMER" ~ "SUMMER",
# SURVEY == "NMFS" & SEASON == "FALL" ~ "FALL")) %>%
# drop_na(VAST_SEASON)
# New implementatiom...
data_temp<- tidy_mod_data %>%
filter(., NMFS_SVSPP == nmfs_species_code) %>%
filter(., EST_YEAR >= fit_year_min & EST_YEAR <= fit_year_max) %>%
mutate(., "VAST_SEASON" = case_when(
SURVEY == "DFO" & SEASON == "SPRING" ~ "SPRING",
SURVEY == "NMFS" & SEASON == "SPRING" ~ "SPRING",
SURVEY == "DFO" & SEASON == "SUMMER" ~ "SUMMER",
SURVEY == "NMFS" & SEASON == "FALL" ~ "FALL",
SURVEY == "DFO" & SEASON == "FALL" ~ as.character("NA"))) %>%
drop_na(VAST_SEASON)
data_temp<- data_temp %>%
filter(., VAST_SEASON %in% fit_seasons)
# Set of years and seasons. The DFO spring survey usually occurs before the NOAA NEFSC spring survey, so ordering accordingly. Pred year max or fit year max??
all_years<- seq(from = fit_year_min, to = fit_year_max, by = 1)
#all_years<- seq(from = fit_year_min, to = pred_years, by = 1)
all_seasons<- fit_seasons
yearseason_set<- expand.grid("SEASON" = all_seasons, "EST_YEAR" = all_years)
all_yearseason_levels<- apply(yearseason_set[,2:1], MARGIN = 1, FUN = paste, collapse = "_")
# year_set<- sort(unique(data_temp$EST_YEAR))
# season_set<- c("DFO", "SPRING", "FALL")
#
# # Create a grid with all unique combinations of seasons and years and then combine these into one "year_season" variable
# yearseason_grid<- expand.grid("SEASON" = season_set, "EST_YEAR" = year_set)
# yearseason_levels<- apply(yearseason_grid[, 2:1], MARGIN = 1, FUN = paste, collapse = "_")
# yearseason_labels<- round(yearseason_grid$EST_YEAR + (as.numeric(factor(yearseason_grid$VAST_SEASON, levels = season_set))-1)/length(season_set), digits = 1)
#
# Similar process, but for the observations
yearseason_i<- apply(data_temp[, c("EST_YEAR", "VAST_SEASON")], MARGIN = 1, FUN = paste, collapse = "_")
yearseason_i<- factor(yearseason_i, levels = all_yearseason_levels)
# Add the year_season factor column to our sampling_data data set
data_temp$VAST_YEAR_SEASON<- yearseason_i
data_temp$VAST_SEASON = factor(data_temp$VAST_SEASON, levels = all_seasons)
# VAST year
data_temp$VAST_YEAR_COV<- ifelse(data_temp$EST_YEAR > fit_year_max, fit_year_max, data_temp$EST_YEAR)
#data_temp$VAST_YEAR_COV<- data_temp$EST_YEAR
data_temp$PredTF<- FALSE
# Ordering...
cov_names<- names(data_temp)[-which(names(data_temp) %in% c("ID", "DATE", "EST_YEAR", "SEASON", "SURVEY", "SVVESSEL", "DECDEG_BEGLAT", "DECDEG_BEGLON", "NMFS_SVSPP", "DFO_SPEC", "PRESENCE", "BIOMASS", "ABUNDANCE", "PredTF", "VAST_YEAR_COV", "VAST_SEASON", "VAST_YEAR_SEASON"))]
cov_names<- cov_names[-which(cov_names == "Season_Match")]
data_temp<- data_temp %>%
dplyr::select("ID", "DATE", "EST_YEAR", "SEASON", "SURVEY", "SVVESSEL", "DECDEG_BEGLAT", "DECDEG_BEGLON", "NMFS_SVSPP", "DFO_SPEC", "PRESENCE", "BIOMASS", "ABUNDANCE", "PredTF", "VAST_YEAR_COV", "VAST_SEASON", "VAST_YEAR_SEASON", {{cov_names}})
# Make dummy data for all year_seasons to estimate gaps in sampling if needed
dummy_data<- data.frame("ID" = sample(data_temp$ID, size = 1), "DATE" = mean(data_temp$DATE, na.rm = TRUE), "EST_YEAR" = yearseason_set[,'EST_YEAR'], "SEASON" = yearseason_set[,'SEASON'], "SURVEY" = "DUMMY", "SVVESSEL" = "DUMMY", "DECDEG_BEGLAT" = mean(data_temp$DECDEG_BEGLAT, na.rm = TRUE), "DECDEG_BEGLON" = mean(data_temp$DECDEG_BEGLON, na.rm = TRUE), "NMFS_SVSPP" = "DUMMY", "DFO_SPEC" = "DUMMY", "PRESENCE" = 1, "BIOMASS" = 1, "ABUNDANCE" = 1, "PredTF" = TRUE, "VAST_YEAR_COV" = yearseason_set[,'EST_YEAR'], "VAST_SEASON" = yearseason_set[,'SEASON'], "VAST_YEAR_SEASON" = all_yearseason_levels)
# Add in "covariates"
col_ind<- ncol(dummy_data)
for(i in seq_along(cov_names)){
col_ind<- col_ind+1
cov_vec<- unlist(data_temp[,{{cov_names}}[i]])
dummy_data[,col_ind]<- mean(cov_vec, na.rm = TRUE)
names(dummy_data)[col_ind]<- {{cov_names}}[i]
}
# Combine with original dataset
vast_data_out<- rbind(data_temp, dummy_data)
vast_data_out$VAST_YEAR_COV<- factor(vast_data_out$VAST_YEAR_COV, levels = seq(from = fit_year_min, to = fit_year_max, by = 1))
#vast_data_out$VAST_YEAR_COV<- factor(vast_data_out$VAST_YEAR_COV, levels = seq(from = fit_year_min, to = pred_years, by = 1))
# If we have additional years that we want to predict to and NOT Fit too, we aren't quite done just yet...
if(!is.null(pred_df)){
# Name work...
pred_df<- pred_df %>%
dplyr::select(., -Summarized, -Ensemble_Stat)
# Add those -- check names first
check_names<- all(colnames(pred_df) %in% colnames(vast_data_out)) & all(colnames(vast_data_out) %in% colnames(pred_df))
if(!check_names){
print("Check data and prediction column names, they don't match")
stop()
} else {
pred_df_bind<- pred_df %>%
dplyr::select(., colnames(vast_data_out))
# # We only need one observation for each of the times...
pred_df_bind<- pred_df %>%
dplyr::select(., colnames(vast_data_out)) %>%
distinct(., ID, .keep_all = TRUE)
vast_data_out<- rbind(vast_data_out, pred_df_bind)
}
}
# Save and return it
saveRDS(vast_data_out, file = paste(out_dir, "vast_data.rds", sep = "/"))
return(vast_data_out)
}
#' @title Make VAST sample dataset
#'
#' @description This function creates a VAST sample dataset to pass into calls to `VAST::fit_model`.
#'
#' @param vast_seasonal_data = Description
#' @param out_dir = Description
#'
#' @return A sample dataframe that includes all of the "sample" or species occurrence information. This file is also saved in out_dir.
#'
#' @export
make_vast_sample_data<- function(vast_seasonal_data, fit_seasons, out_dir){
# For debugging
if(FALSE){
tar_load(vast_seasonal_data)
out_dir = here::here("scratch/aja/targets_flow/data/dfo/combined")
}
# Select columns we want from the "full" vast_seasonal_data dataset. Area swept Marine fish diversity on the Scotian Shelf, Canada
vast_samp_dat<- data.frame(
"Year" = as.numeric(vast_seasonal_data$VAST_YEAR_SEASON)-1,
"Lat" = vast_seasonal_data$DECDEG_BEGLAT,
"Lon" = vast_seasonal_data$DECDEG_BEGLON,
"Biomass" = vast_seasonal_data$BIOMASS,
"Swept" = ifelse(vast_seasonal_data$SURVEY == "NMFS", 0.0384, 0.0404),
"Pred_TF" = vast_seasonal_data$PredTF
)
# Save and return it
saveRDS(vast_samp_dat, file = paste(out_dir, "vast_sample_data.rds", sep = "/"))
return(vast_samp_dat)
}
#' @title Make VAST covariate dataset
#'
#' @description This function creates a VAST covariate dataset to pass into calls to `VAST::fit_model`.
#'
#' @param vast_seasonal_data = Description
#' @param rescale = Logical indicating whether or not the covariates should be rescaled.
#' @param out_dir = Description
#'
#' @return A sample dataframe that includes all of the covariate information at each unique sample. This file is also saved in out_dir.
#'
#' @export
make_vast_covariate_data<- function(vast_seasonal_data, out_dir){
# For debugging
if(FALSE){
tar_load(vast_seasonal_data)
rescale =
out_dir = here::here("scratch/aja/targets_flow/data/dfo/combined")
}
# Some work to make sure that we don't allow covariates for the "DUMMY" observations to be used at the knots...
vast_seasonal_data_temp<- vast_seasonal_data
# Select columns we want from the "full" vast_seasonal_data dataset
vast_cov_dat<- data.frame(
"Year" = as.numeric(vast_seasonal_data_temp$VAST_YEAR_SEASON)-1,
"Year_Cov" = vast_seasonal_data_temp$VAST_YEAR_COV,
"Season" = vast_seasonal_data_temp$VAST_SEASON,
"Depth" = vast_seasonal_data_temp$Depth,
"SST_seasonal" = vast_seasonal_data_temp$SST_seasonal,
"BT_seasonal" = vast_seasonal_data_temp$BT_seasonal,
"BS_seasonal" = vast_seasonal_data_temp$BS_seasonal,
"SS_seasonal" = vast_seasonal_data_temp$SS_seasonal,
"Lat" = vast_seasonal_data_temp$DECDEG_BEGLAT,
"Lon" = vast_seasonal_data_temp$DECDEG_BEGLON
)
# Save and return
saveRDS(vast_cov_dat, file = paste(out_dir, "vast_covariate_data.rds", sep = "/"))
return(vast_cov_dat)
}
#' @title Make VAST catachability
#'
#' @description This function creates a VAST catachability dataset to pass into calls to `VAST::fit_model`.
#'
#' @param vast_seasonal_data = Description
#' @param out_dir = Description
#'
#' @return A sample dataframe that includes all of the covariate information at each unique sample. This file is also saved in out_dir.
#'
#' @export
make_vast_catchability_data<- function(vast_seasonal_data, out_dir){
# For debugging
if(FALSE){
vast_seasonal_data
out_dir = here::here("scratch/aja/targets_flow/data/dfo/combined")
}
# Select columns we want from the "full" vast_seasonal_data dataset
vast_catch_dat<- data.frame(
"Year" = as.numeric(vast_seasonal_data$VAST_YEAR_SEASON)-1,
"Year_Cov" = vast_seasonal_data$VAST_YEAR_COV,
"Season" = vast_seasonal_data$VAST_SEASON,
"Lat" = vast_seasonal_data$DECDEG_BEGLAT,
"Lon" = vast_seasonal_data$DECDEG_BEGLON,
"Survey" = factor(vast_seasonal_data$SURVEY, levels = c("NMFS", "DFO", "DUMMY"))
)
# Save and return it
saveRDS(vast_catch_dat, file = paste(out_dir, "vast_catchability_data.rds", sep = "/"))
return(vast_catch_dat)
}
#' @title Read in shapefile
#'
#' @description A short function to read in a shapefile given a file path
#'
#' @param file_path = File path to geospatial vector polygon file with .shp extension, specifying the location and shape of the area of interest.
#' @param factor_vars = Names of factor columns that should be checked and converted if necessary
#'
#' @return SF poylgon
#'
#' @export
read_polyshape<- function(polyshape_path){
# For debugging
if(FALSE){
polyshape_path = "~/Box/RES_Data/Shapefiles/NELME_regions/NELME_sf.shp"
}
# Read in polygon shapefile from file_path
shapefile<- st_read(polyshape_path)
# Return it
return(shapefile)
}
####
#' @title Make VAST extrapolation grid settings from a shapefile
#'
#' @description Create a list of with information defining the extrapolation grid and used by subsequent VAST functions, leveraging code here: https://github.com/James-Thorson-NOAA/VAST/wiki/Creating-an-extrapolation-grid.
#'
#' @param region_shapefile = A geospatial vector sf polygon file, specifying the location and shape of the area of of spatial domain
#' @param index_shapes = A multipolygon geospatial vector sf polygon file, specifying sub regions of interest. Grid locations are assigned to their subregion within the total spatial domain.
#' @param cell_size = The size of grid in meters (since working in UTM). This will control the resolution of the extrapolation grid.
#'
#' @return Tagged list containing extrapolation grid settings needed to fit a VAST model of species occurrence.
#'
#' @export
vast_make_extrap_grid<- function(region_shapefile, index_shapes, strata.limits, cell_size){
# For debugging
if(FALSE){
tar_load(index_shapefiles)
index_shapes = index_shapefiles
strata.limits = strata_use
cell_size = 25000
}
# Transform crs of shapefile to common WGS84 lon/lat format.
region_wgs84<- st_transform(region_shapefile, crs = "+proj=longlat +lat_0=90 +lon_0=180 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0 ")
# Get UTM zone
lon<- sum(st_bbox(region_wgs84)[c(1,3)])/2
utm_zone<- floor((lon + 180)/6)+1
# Transform to the UTM zone
crs_utm<- st_crs(paste0("+proj=utm +zone=", utm_zone, " +ellps=WGS84 +datum=WGS84 +units=m +no_defs "))
region_utm<- st_transform(region_wgs84, crs = crs_utm)
# Make extrapolation grid with sf
region_grid<- st_as_sf(st_make_grid(region_utm, cellsize = cell_size, what = "centers"), crs = crs_utm)
# Now get only the points that fall within the shape polygon
points_keep<- data.frame("pt_row" = seq(from = 1, to = nrow(region_grid), by = 1), "in_out" = st_intersects(region_grid, region_utm, sparse = FALSE))
region_grid<- region_grid %>%
mutate(., "in_poly" = st_intersects(region_grid, region_utm, sparse = FALSE)) %>%
filter(., in_poly == TRUE)
# Convert back to WGS84 lon/lat, as that is what VAST expects.
extrap_grid<- region_grid %>%
st_transform(., crs = "+proj=longlat +lat_0=90 +lon_0=180 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0 ") %>%
st_join(., index_shapes, join = st_within) %>%
mutate(., "Lon" = as.numeric(st_coordinates(.)[,1]),
"Lat" = as.numeric(st_coordinates(.)[,2])) %>%
st_drop_geometry() %>%
dplyr::select(., Lon, Lat, Region) %>%
mutate(., Area_km2=((cell_size/1000)^2),
STRATA = factor(Region, levels = index_shapes$Region, labels = index_shapes$Region))
# Return it
return(extrap_grid)
}
####
#' @title Make VAST model settings
#'
#' @description Create a list of model settings needed to fit a VAST model for species occurrence, largely copied from VAST::make_settings
#'
#' @param extrap_grid = User created extrapolation grid from vast_make_extrap_grid.
#' @param FieldConfig = A vector defining the number of spatial (Omega) and spatio-temporal (Epsilon) factors to include in the model for each of the linear predictors. For each factor, possible values range from 0 (which effectively turns off a given factor), to the number of categories being modeled. If FieldConfig < number of categories, VAST estimates common factors and then loading matrices.
#' @param RhoConfig = A vector defining the temporal structure of intercepts (Beta) and spatio-temporal (Epsilon) variation for each of the linear predictors. See `VAST::make_data` for options.
#' @param bias.correct = Logical boolean determining if Epsilon bias-correction should be done.
#' @param Options = Tagged vector to turn on or off specific options (e.g., SD_site_logdensity, Effective area, etc)
#' @param strata.limits
#'
#' @return Tagged list containing settings needed to fit a VAST model of species occurrence.
#'
#' @export
vast_make_settings <- function(extrap_grid, n_knots, FieldConfig, RhoConfig, OverdispersionConfig, bias.correct, knot_method, inla_method, Options, strata.limits){
# For debugging
if(FALSE){
tar_load(vast_extrap_grid)
extrap_grid = vast_extrap_grid
FieldConfig = c("Omega1" = 1, "Epsilon1" = 1, "Omega2" = 1, "Epsilon2" = 1)
RhoConfig = c("Beta1" = 3, "Beta2" = 3, "Epsilon1" = 2, "Epsilon2" = 2)
OverdispersionConfig = c(0, 0)
bias.correct = FALSE
Options = c("Calculate_Range"=TRUE)
strata.limits = strata_use
n_knots = 400
knot_method = "samples"
inla_method = "Barrier"
}
# Run FishStatsUtils::make_settings
settings_out<- make_settings(n_x = n_knots, Region = "User", purpose = "index2", FieldConfig = FieldConfig, RhoConfig = RhoConfig, ObsModel = c(2, 1), OverdispersionConfig = OverdispersionConfig, bias.correct = bias.correct, knot_method = knot_method, treat_nonencounter_as_zero = FALSE, strata.limits = strata.limits)
settings_out$Method<- inla_method
# Adjust options?
options_new<- settings_out$Options
if(!is.null(Options)){
for(i in seq_along(Options)){
options_adjust_i<- Options[i]
options_new[[which(names(options_new) == names(options_adjust_i))]]<- options_adjust_i
}
settings_out<- make_settings(n_x = n_knots, Region = "User", purpose = "index2", FieldConfig = FieldConfig, RhoConfig = RhoConfig, ObsModel = c(1, 1), OverdispersionConfig = OverdispersionConfig, bias.correct = bias.correct, knot_method = knot_method, treat_nonencounter_as_zero = FALSE, strata.limits = strata.limits, Options = options_new)
settings_out$Method<- inla_method
}
# Return it
return(settings_out)
}
####
#' @title Make VAST spatial info
#'
#' @description Create a tagged list with VAST spatial information needed
#'
#' @param extrap_grid = User created extrapolation grid from vast_make_extrap_grid.
#' @param vast_settings = A
#' @param vast_sample_data = A
#' @param out_dir = A
#'
#' @return Returns a tagged list with extrapolation and spatial info in different slots
#'
#' @export
vast_make_spatial_lists<- function(extrap_grid, vast_settings, tidy_mod_data, out_dir){
# For debugging
if(FALSE){
tar_load(vast_extrap_grid)
extrap_grid = vast_extrap_grid
tar_load(vast_settings)
tar_load(tidy_mod_data)
inla_method = "Barrier"
out_dir = here::here()
}
# Run FishStatsUtiles::make_extrapolation_info
vast_extrap_info<- make_extrapolation_info(Region = vast_settings$Region, strata.limits = vast_settings$strata.limits, input_grid = extrap_grid, DirPath = out_dir)
# Run FishStatsUtils::make_spatial_info
vast_spatial_info<- make_spatial_info(n_x = vast_settings$n_x, Lon_i = tidy_mod_data$DECDEG_BEGLON, Lat_i = tidy_mod_data$DECDEG_BEGLAT, Extrapolation_List = vast_extrap_info, knot_method = vast_settings$knot_method, Method = vast_settings$Method, grid_size_km = vast_settings$grid_size_km, fine_scale = vast_settings$fine_scale, DirPath = out_dir, Save_Results = TRUE)
# Combine into one list of lists
spatial_lists_out<- list(vast_extrap_info, vast_spatial_info)
names(spatial_lists_out)<- c("Extrapolation_List", "Spatial_List")
return(spatial_lists_out)
}
####
#' @title Reduce VAST prediction dataframe from regular grid to knot locations
#'
#' @description Reduce VAST prediction dataframe from regular grid to knot locations
#'
#' @param extrap_grid = User created extrapolation grid from vast_make_extrap_grid.
#' @param vast_settings = A
#' @param vast_sample_data = A
#' @param out_dir = A
#'
#' @return Returns a tagged list with extrapolation and spatial info in different slots
#'
#' @export
reduce_vast_predict_df<- function(vast_predict_df = vast_predict_df, vast_spatial_lists = vast_spatial_lists, out_dir = here::here("data/predict")){
# For debugging
if(FALSE){
tar_load(vast_predict_df)
tar_load(vast_spatial_lists)
}
# Knots_sf
knots_info<- vast_spatial_lists$Spatial_List
knots_sf<- st_as_sf(data.frame(knots_info$loc_x), coords = c("E_km", "N_km"), crs = attributes(knots_info$loc_i)$projCRS)
# Get unique prediction locations and assign each prediction location to its nearest knot?
pred_df_temp<- vast_predict_df %>%
distinct(., DECDEG_BEGLON, DECDEG_BEGLAT)
pred_sf<- points_to_sf(pred_df_temp) %>%
st_transform(., crs = st_crs(knots_sf))
pred_nearest_knot<- pred_sf %>%
mutate(., "Nearest_knot" = st_nearest_feature(x = ., y = knots_sf)) %>%
st_drop_geometry()
# Merge this with full prediction dataset
pred_df_out<- vast_predict_df %>%
left_join(., pred_nearest_knot)
# Average covariate values based on nearest knot location and output reduced dataframe
pred_df_out<- pred_df_out %>%
distinct(., ID, DATE, Nearest_knot, .keep_all = TRUE) %>%
dplyr::select(-Nearest_knot)
return(pred_df_out)
}
####
#' @title Make VAST covariate effect objects
#'
#' @description Create covariate effects for both linear predictors
#'
#' @param X1_coveff_vec = A vector specifying the habitat covariate effects for first linear predictor.
#' @param X2_coveff_vec = A vector specifying the habitat covariate effects for second linear predictor.
#' @param Q1_coveff_vec = A vector specifying the catchability covariate effects for first linear predictor.
#' @param Q2_coveff_vec = A vector specifying the catchability covariate effects for second linear predictor.
#'
#' @return A list with covariate effects for the habitat covariates and first linear predictor (first list slot), habitat covariates and second linear predictor (second list slot), catchability covariates and first linear predictor (third slot) and catchability covariates and second linear predictor (fourth slot).
#'
#' @export
vast_make_coveff<- function(X1_coveff_vec, X2_coveff_vec, Q1_coveff_vec, Q2_coveff_vec){
# For debugging
if(FALSE){
X1_coveff_vec = c(2, 3, 3, 2, rep(3, 32))
X2_coveff_vec = c(2, 3, 3, 2, rep(3, 32))
Q1_coveff_vec = NULL
Q2_coveff_vec = NULL
}
# Combine into a list and name it
if(is.null(Q1_coveff_vec) | is.null(Q2_coveff_vec)){
coveff_out<- list("X1config_cp" = matrix(X1_coveff_vec, nrow = 1), "X2config_cp" = matrix(X2_coveff_vec, nrow = 1), "Q1config_k" = NULL, "Q2config_k" = NULL)
} else {
coveff_out<- list("X1config_cp" = matrix(X1_coveff_vec, nrow = 1), "X2config_cp" = matrix(X2_coveff_vec, nrow = 1), "Q1config_k" = matrix(Q1_coveff_vec, nrow = 1), "Q2config_k" = matrix(Q2_coveff_vec, nrow = 1))
}
# Return it
return(coveff_out)
}
####
#' @title Build VAST SDM
#'
#' @description Build VAST species distribution model, without running it. This can be helpful to check settings before running `vast_fit_sdm`. Additionally, it can be helpful for making subsequent modifications, particularly to mapping.
#'
#' @param settings = A tagged list with the settings for the model, created with `vast_make_settings`.
#' @param extrap_grid = An extrapolation grid, created with `vast_make_extrap_grid`.
#' @param Method = A character string specifying which Method to use when making the mesh.
#' @param sample_dat = A data frame with the biomass sample data for each species at each tow.
#' @param covariate_dat = A data frame with the habitat covariate data for each tow.
#' @param X1_formula = A formula for the habitat covariates and first linear predictor.
#' @param X2_formula = A formula for the habitat covariates and second linear predictor.
#' @param X_contrasts = A tagged list specifying the contrasts to use for factor covariates in the model.
#' @param Xconfig_list = A tagged list specifying the habitat and catchability covariate effects for first and second linear predictors.
#' @param catchability_data = A data frame with the catchability data for every sample
#' @param Q1_formula = A formula for the catchability covariates and first linear predictor.
#' @param Q2_formula = A formula for the catchability covariates and second linear predictor.
#' @param index_shapefiles = A sf object with rows for each of the regions of interest
#'
#' @return A VAST `fit_model` object, with the inputs and built TMB object components.
#'
#' @export
vast_build_sdm <- function(settings, extrap_grid, sample_data, covariate_data, X1_formula, X2_formula, X_contrasts, Xconfig_list, catchability_data, Q1_formula, Q2_formula, index_shapes, spatial_info_dir){
# For debugging
if(FALSE){
library(VAST)
library(tidyverse)
library(stringr)
# Seasonal
tar_load(vast_settings)
settings = vast_settings
tar_load(vast_extrap_grid)
extrap_grid = vast_extrap_grid
tar_load(vast_sample_data)
sample_data = vast_sample_data
tar_load(vast_covariate_data)
covariate_data = vast_covariate_data
X1_formula = hab_formula
X2_formula = hab_formula
hab_env_coeffs_n = hab_env_coeffs_n
tar_load(vast_catchability_data)
catchability_data = vast_catchability_data
catch_formula<- ~ Survey
Q1_formula = catch_formula
Q2_formula = catch_formula
X_contrasts = list(Season = contrasts(vast_covariate_data$Season, contrasts = FALSE), Year_Cov = contrasts(vast_covariate_data$Year_Cov, contrasts = FALSE))
# X_contrasts = list(Year_Cov = contrasts(vast_covariate_data$Year_Cov, contrasts = FALSE))
tar_load(vast_coveff)
Xconfig_list = vast_coveff
tar_load(index_shapefiles)
index_shapes = index_shapefiles
spatial_info_dir = here::here("")
# Annual
tar_load(vast_settings)
settings = vast_settings
tar_load(vast_extrap_grid)
extrap_grid = vast_extrap_grid
tar_load(vast_sample_data)
sample_data = vast_sample_data
tar_load(vast_covariate_data)
covariate_data = vast_covariate_data
X1_formula = hab_formula
X2_formula = hab_formula
hab_env_coeffs_n = hab_env_coeffs_n
tar_load(vast_catchability_data)
catchability_data = vast_catchability_data
catch_formula<- ~ 0
Q1_formula = catch_formula
Q2_formula = catch_formula
X_contrasts = list(Year_Cov = contrasts(vast_covariate_data$Year_Cov, contrasts = FALSE))
tar_load(vast_coveff)
Xconfig_list = vast_coveff
tar_load(index_shapefiles)
index_shapes<- index_shapefiles
}
# Check names
samp_dat_names<- c("Lat", "Lon", "Year", "Biomass", "Swept", "Pred_TF")
if(!(all(samp_dat_names %in% names(sample_data)))){
stop(paste("Check names in sample data. Must include:", paste0(samp_dat_names, collapse = ","), sep = " "))
}
# Covariate data frame names
if(!is.null(covariate_data)){
cov_dat_names1<- unlist(str_extract_all(X1_formula, boundary("word"))[[2]])
# Remove some stuff associated with the splines...
spline_words<- c("bs", "degree", "TRUE", "intercept", unique(as.numeric(unlist(str_extract_all(X1_formula, pattern = "[0-9]+", simplify = TRUE)))), "FALSE")
cov_dat_names1<- cov_dat_names1[-which(cov_dat_names1 %in% spline_words)]
cov_dat_names2<- unlist(str_extract_all(X2_formula, boundary("word"))[[2]])
cov_dat_names2<- cov_dat_names2[-which(cov_dat_names2 %in% spline_words)]
cov_dat_names_all<- unique(c(cov_dat_names1, cov_dat_names2))
if(!(all(cov_dat_names_all %in% names(covariate_data)))){
print(names(covariate_data))
print(names(cov_dat_names_all))
stop(paste("Check names in covariate data. Must include", paste0(cov_dat_names_all, collapse = ","), sep = " "))
}
}
if(!(all(c("X1config_cp", "X2config_cp", "Q1config_k", "Q2config_k") %in% names(Xconfig_list)))){
stop(paste("Check names of Xconfig_list. Must be", paste0(c("X1config_cp", "X2config_cp", "Q1config_k", "Q2config_k"), collapse = ","), sep = ""))
}
# Run VAST::fit_model with correct info and settings
vast_build_out<- fit_model_aja("settings" = settings, "Method" = settings$Method, "input_grid" = extrap_grid, "Lat_i" = sample_data[, 'Lat'], "Lon_i" = sample_data[, 'Lon'], "t_i" = sample_data[, 'Year'], "c_i" = rep(0, nrow(sample_data)), "b_i" = sample_data[, 'Biomass'], "a_i" = sample_data[, 'Swept'], "PredTF_i" = sample_data[, 'Pred_TF'], "X1config_cp" = Xconfig_list[['X1config_cp']], "X2config_cp" = Xconfig_list[['X2config_cp']], "covariate_data" = covariate_data, "X1_formula" = X1_formula, "X2_formula" = X2_formula, "X_contrasts" = X_contrasts, "catchability_data" = catchability_data, "Q1_formula" = Q1_formula, "Q2_formula" = Q2_formula, "Q1config_k" = Xconfig_list[['Q1config_k']], "Q2config_k" = Xconfig_list[['Q2config_k']], "newtonsteps" = 1, "getsd" = TRUE, "getReportCovariance" = TRUE, "run_model" = FALSE, "test_fit" = FALSE, "Use_REML" = FALSE, "getJointPrecision" = TRUE, "index_shapes" = index_shapes, "DirPath" = spatial_info_dir)
# Return it
return(vast_build_out)
}
####
#' @title Adjust VAST SDM
#'
#' @description Make adjustments to VAST SDM and the model returned in `vast_build_sdm`. This can either be the exact same as the one built using `vast_build_sdm`, or it can update that model with adjustments provided in a tagged list.
#'
#' @param vast_build = A VAST `fit_model` object.
#' @param adjustments = Either NULL (default) or a tagged list identifying adjustments that should be made to the vast_build `fit_model` object. If NULL, the identical model defined by the `vast_build` is run and fitted.
#' @param index_shapefiles = A sf object with rows for each of the regions of interest
#'
#' @return A VAST fit_model object, with the inputs and built TMB object components.
#'
#' @export
vast_make_adjustments <- function(vast_build, index_shapes, spatial_info_dir, adjustments = NULL){
# For debugging
if(FALSE){
tar_load(vast_build0)
vast_build = vast_build0
tar_load(vast_covariate_data)
adjustments = list("log_sigmaXi1_cp" = factor(c(rep(1, length(unique(fit_seasons))), rep(4, nlevels(vast_covariate_data$Year_Cov)), rep(NA, gam_degree*hab_env_coeffs_n))), "log_sigmaXi2_cp" = factor(c(rep(1, length(unique(fit_seasons))), rep(4, nlevels(vast_covariate_data$Year_Cov)), rep(NA, gam_degree*hab_env_coeffs_n))), "lambda1_k" = factor(c(1, NA)), "lambda2_k" = factor(c(1, NA)))
tar_load(index_shapefiles)
index_shapes<- index_shapefiles
}
# If no adjustments are needed, just need to pull information from vast_build and then set "run_model" to TRUE
if(is.null(adjustments)){
vast_build_adjust_out<- fit_model_aja("settings" = vast_build$settings, "input_grid" = vast_build$input_args$data_args_input$input_grid, "Method" = vast_build$settings$Method, "Lat_i" = vast_build$data_frame[, 'Lat_i'], "Lon_i" = vast_build$data_frame[, 'Lon_i'], "t_i" = vast_build$data_frame[, 't_i'], "c_iz" = vast_build$data_frame[, 'c_iz'], "b_i" = vast_build$data_frame[, 'b_i'], "a_i" = vast_build$data_frame[, 'a_i'], "PredTF_i" = vast_build$data_list[['PredTF_i']], "X1config_cp" = vast_build$input_args$data_args_input[['X1config_cp']], "X2config_cp" = vast_build$input_args$data_args_input[['X2config_cp']], "covariate_data" = vast_build$input_args$data_args_input$covariate_data, "X1_formula" = vast_build$input_args$data_args_input$X1_formula, "X2_formula" = vast_build$input_args$data_args_input$X2_formula, "X_contrasts" = vast_build$input_args$data_args_input$X_contrasts, "catchability_data" = vast_build$input_args$data_args_input$catchability_data, "Q1_formula" = vast_build$input_args$data_args_input$Q1_formula, "Q2_formula" = vast_build$input_args$data_args_input$Q2_formula, "Q1config_k" = vast_build$input_args$data_args_input[['Q1config_cp']], "Q2config_k" = vast_build$input_args$data_args_input[['Q2config_k']], "newtonsteps" = 1, "getsd" = TRUE, "getReportCovariance" = TRUE, "run_model" = FALSE, "test_fit" = FALSE, "Use_REML" = FALSE, "getJointPrecision" = vast_build$input_args$extra_args$getJointPrecision, "index_shapes" = index_shapes, "DirPath" = spatial_info_dir)
}
# If there are adjustments, need to make those and then re run model.
if(!is.null(adjustments)){
# Check names -- trying to think of what the possible adjustment flags would be in the named list
adjust_names<- c("FieldConfig", "RhoConfig", "X1_formula", "X2_formula", "X1config_cp", "X2config_cp", "X_contrasts", "log_sigmaXi1_cp", "log_sigmaXi2_cp", "lambda1_k", "lambda2_k", "Q1_formula", "Q2_formula", "Q1config_k", "Q2config_k")
if(!(all(names(adjustments) %in% adjust_names))){
stop(paste("Check names in adjustment list. Must be one of", paste0(adjust_names, collapse = ","), sep = " "))
}
# First options are going to be in the settings bit..
if(any(names(adjustments) %in% c("FieldConfig", "RhoConfig"))){
# Get just the settings adjustments
settings_adjusts<- names(adjustments)[which(names(adjustments) %in% names(vast_build$settings))]
for(i in seq_along(settings_adjusts)){
setting_adjust_i<- settings_adjusts[i]
vast_build$settings[[{{setting_adjust_i}}]]<- adjustments[[{{setting_adjust_i}}]]
}
}
# A lot of stuff is going to be in the `vast_build$input_args$data_args_input` object
if(any(names(adjustments) %in% names(vast_build$input_args$data_args_input))){
# Get just the data args adjustments
data_adjusts<- names(adjustments)[which(names(adjustments) %in% names(vast_build$input_args$data_args_input))]
for(i in seq_along(data_adjusts)){
data_adjust_i<- data_adjusts[i]
vast_build$input_args$data_args_input[[{{data_adjust_i}}]]<- adjustments[[{{data_adjust_i}}]]
}
}
# Only other adjustment (for now) is Map.
if(any(names(adjustments) %in% c("log_sigmaXi1_cp", "log_sigmaXi2_cp", "lambda1_k", "lambda2_k"))){
# Get the original, which we can then edit...
map_adjust_out<- vast_build$tmb_list$Map
# Get just the map adjustment names
map_adjusts<- names(adjustments)[which(names(adjustments) %in% names(vast_build$tmb_list$Map))]
# Loop over them
for(i in seq_along(map_adjusts)){
map_adjust_i<- map_adjusts[i]
map_adjust_out[[{{map_adjust_i}}]]<- adjustments[[{{map_adjust_i}}]]
}
}
# Now, re-build and fit model. This is slightly different if we have changed map or not...
if(any(names(adjustments) %in% c("log_sigmaXi1_cp", "log_sigmaXi2_cp", "lambda1_k", "lambda2_k"))){
# Adding Map argument
vast_build_adjust_out<- fit_model_aja("settings" = vast_build$settings, "input_grid" = vast_build$input_args$data_args_input$input_grid, "Method" = vast_build$settings$Method, "Lat_i" = vast_build$data_frame[, 'Lat_i'], "Lon_i" = vast_build$data_frame[, 'Lon_i'], "t_i" = vast_build$data_frame[, 't_i'], "c_iz" = vast_build$data_frame[, 'c_iz'], "b_i" = vast_build$data_frame[, 'b_i'], "a_i" = vast_build$data_frame[, 'a_i'], "PredTF_i" = vast_build$data_list[['PredTF_i']], "X1config_cp" = vast_build$input_args$data_args_input[['X1config_cp']], "X2config_cp" = vast_build$input_args$data_args_input[['X2config_cp']], "covariate_data" = vast_build$input_args$data_args_input$covariate_data, "X1_formula" = vast_build$input_args$data_args_input$X1_formula, "X2_formula" = vast_build$input_args$data_args_input$X2_formula, "X_contrasts" = vast_build$input_args$data_args_input$X_contrasts, "catchability_data" = vast_build$input_args$data_args_input$catchability_data, "Q1_formula" = vast_build$input_args$data_args_input$Q1_formula, "Q2_formula" = vast_build$input_args$data_args_input$Q2_formula, "Q1config_k" = vast_build$input_args$data_args_input[['Q1config_k']], "Q2config_k" = vast_build$input_args$data_args_input[['Q2config_k']], "Map" = map_adjust_out, "newtonsteps" = 1, "getsd" = TRUE, "getReportCovariance" = TRUE, "run_model" = FALSE, "test_fit" = FALSE, "Use_REML" = FALSE, "getJointPrecision" = FALSE, "index_shapes" = index_shapes, "DirPath" = spatial_info_dir)
} else {
# No need for Map argument, just build and fit
vast_build_adjust_out<- fit_model_aja("settings" = vast_build$settings, "input_grid" = vast_build$input_args$data_args_input$input_grid, "Method" = vast_build$settings$Method, "Lat_i" = vast_build$data_frame[, 'Lat_i'], "Lon_i" = vast_build$data_frame[, 'Lon_i'], "t_i" = vast_build$data_frame[, 't_i'], "c_iz" = vast_build$data_frame[, 'c_iz'], "b_i" = vast_build$data_frame[, 'b_i'], "a_i" = vast_build$data_frame[, 'a_i'], "PredTF_i" = vast_build$data_list[['PredTF_i']], "X1config_cp" = vast_build$input_args$data_args_input[['X1config_cp']], "X2config_cp" = vast_build$input_args$data_args_input[['X2config_cp']], "covariate_data" = vast_build$input_args$data_args_input$covariate_data, "X1_formula" = vast_build$input_args$data_args_input$X1_formula, "X2_formula" = vast_build$input_args$data_args_input$X2_formula, "X_contrasts" = vast_build$input_args$data_args_input$X_contrasts, "catchability_data" = vast_build$input_args$data_args_input$catchability_data, "Q1_formula" = vast_build$input_args$data_args_input$Q1_formula, "Q2_formula" = vast_build$input_args$data_args_input$Q2_formula, "Q1config_cp" = vast_build$input_args$data_args_input[['Q1config_cp']], "Q2config_cp" = vast_build$input_args$data_args_input[['Q2config_cp']], "newtonsteps" = 1, "getsd" = TRUE, "getReportCovariance" = TRUE, "run_model" = FALSE, "test_fit" = FALSE, "Use_REML" = FALSE, "getJointPrecision" = FALSE, "index_shapes" = index_shapes, "DirPath" = spatial_info_dir)
}
}
# Return it
return(vast_build_adjust_out)
}
#' @title Fit VAST SDM
#'
#' @description Fit VAST species distribution model
#'
#' @param vast_build_adjust = A VAST `fit_model` object.
#' @param nice_category_names =
#' @param index_shapefiles = A sf object with rows for each of the regions of interest
#' @param out_dir
#'
#' @return A VAST fit_model object, with the inputs and and outputs, including parameter estimates, extrapolation gid info, spatial list info, data info, and TMB info.
#'
#' @export
vast_fit_sdm <- function(vast_build_adjust, nice_category_names, index_shapes, spatial_info_dir, out_dir){
# For debugging
if(FALSE){
tar_load(vast_adjust)
vast_build_adjust = vast_adjust
nice_category_names = nice_category_names
out_dir = here::here("results/mod_fits")
tar_load(index_shapefiles)
index_shapes = index_shapefiles
spatial_info_dir = here::here("")
}
# Build and fit model
vast_fit_out<- fit_model_aja("settings" = vast_build_adjust$settings, "input_grid" = vast_build_adjust$input_args$data_args_input$input_grid, "Method" = vast_build_adjust$settings$Method, "Lat_i" = vast_build_adjust$data_frame[, 'Lat_i'], "Lon_i" = vast_build_adjust$data_frame[, 'Lon_i'], "t_i" = vast_build_adjust$data_frame[, 't_i'], "c_iz" = vast_build_adjust$data_frame[, 'c_iz'], "b_i" = vast_build_adjust$data_frame[, 'b_i'], "a_i" = vast_build_adjust$data_frame[, 'a_i'], "PredTF_i" = vast_build_adjust$data_list[['PredTF_i']], "X1config_cp" = vast_build_adjust$input_args$data_args_input[['X1config_cp']], "X2config_cp" = vast_build_adjust$input_args$data_args_input[['X2config_cp']], "covariate_data" = vast_build_adjust$input_args$data_args_input$covariate_data, "X1_formula" = vast_build_adjust$input_args$data_args_input$X1_formula, "X2_formula" = vast_build_adjust$input_args$data_args_input$X2_formula, "X_contrasts" = vast_build_adjust$input_args$data_args_input$X_contrasts, "catchability_data" = vast_build_adjust$input_args$data_args_input$catchability_data, "Q1_formula" = vast_build_adjust$input_args$data_args_input$Q1_formula, "Q2_formula" = vast_build_adjust$input_args$data_args_input$Q2_formula, "Q1config_cp" = vast_build_adjust$input_args$data_args_input[['Q1config_cp']], "Q2config_cp" = vast_build_adjust$input_args$data_args_input[['Q2config_cp']], "Map" = vast_build_adjust$tmb_list$Map, "newtonsteps" = 1, "getsd" = TRUE, "getReportCovariance" = TRUE, "run_model" = TRUE, "test_fit" = FALSE, "Use_REML" = FALSE, "getJointPrecision" = vast_build_adjust$input_args$extra_args$getJointPrecision, "index_shapes" = index_shapes, "DirPath" = spatial_info_dir)
# Save and return it
saveRDS(vast_fit_out, file = paste(out_dir, "/", nice_category_names, "_", "fitted_vast.rds", sep = "" ))
return(vast_fit_out)
}
#' @title Predict fitted VAST model
#'
#' @description This function makes predictions from a fitted VAST SDM to new locations using VAST::predict.fit_model. Importantly, to use this feature for new times, at least one location for each time of interest needs to be included during the model fitting process. This dummy observation should have a PredTF value of 1 so that the observation is only used in the predicted probability and NOT estimating the likelihood.
#'
#' @param vast_fitted_sdm = A fitted VAST SDM object, as returned with `vast_fit_sdm`
#' @param nice_category_names = A
#' @param predict_variable = Which variable should be predicted, default is density (D_i)
#' @param predict_category = Which category (species/age/size) should be predicted, default is 0
#' @param predict_vessel = Which sampling category should be predicted, default is 0
#' @param predict_covariates_df_all = A long data frame with all of the prediction covariates
#' @param memory_save = Logical. If TRUE, then predictions are only made to knots as defined within the vast_fitted_sdm object. This is done by finding the prediction locations that are nearest neighbors to each knot. If FALSE, then predictions are made to each of the locations in the predict_covariates_df_all.
#' @param out_dir = Output directory to save...
#'
#' @return
#'
#' @export
predict_vast<- function(vast_fitted_sdm, nice_category_names, predict_variable = "D_i", predict_category = 0, predict_vessel = 0, predict_covariates_df_all, cov_names, time_col, out_dir){
# For debugging
if(FALSE){
# Targets
tar_load(vast_fit)
vast_fitted_sdm = vast_fit
nmfs_species_code = 101
predict_variable = "Index_gctl"
predict_category = 0
predict_vessel = 0
tar_load(vast_predict_df)
predict_covariates_df_all = vast_predict_df
# Basic example...
vast_fitted_sdm = readRDS(here::here("", "results/mod_fits/1011_fitted_vast.rds"))
nmfs_species_code = 101
predict_variable = "Index_gctl"
predict_category = 0
predict_vessel = 0
predict_covariates_df_all<- pred_df
time_col = "Year"
cov_names = c("Depth", "SST_seasonal", "BT_seasonal")
}
#### Not the biggest fan of this, but for now, building in a work around to resolve some of the memory issues that we were running into by supplying a 0.25 degree grid and trying to predict/project for each season-year from 1980-2100. To overcome this issue, going to try to just make the projections to knots and do the smoothing later.
# First, need to get the knot locations
knot_locs<- data.frame(vast_fitted_sdm$spatial_list$latlon_g) %>%
st_as_sf(., coords = c("Lon", "Lat"), remove = FALSE) %>%
mutate(., "Pt_Id" = 1:nrow(.))
# Nearest knot to each point?
pred_sf<- predict_covariates_df_all %>%
st_as_sf(., coords = c("Lon", "Lat"), remove = FALSE)
pred_sf<- pred_sf %>%
mutate(., "Nearest_Knot" = st_nearest_feature(., knot_locs))
# Average the points...
pred_df_knots<- pred_sf %>%
st_drop_geometry()
group_by_vec<- c({{time_col}}, "Nearest_Knot")
pred_df_knots<- pred_df_knots %>%
group_by_at(.vars = group_by_vec) %>%
summarize_at(all_of(cov_names), mean, na.rm = TRUE) %>%
left_join(., st_drop_geometry(knot_locs), by = c("Nearest_Knot" = "Pt_Id")) %>%
ungroup()
# Collecting necessary bits from the prediction covariates -- lat, lon, time
pred_lats<- pred_df_knots$Lat
pred_lons<- pred_df_knots$Lon
pred_times<- as.numeric(unlist(pred_df_knots[{{time_col}}]))
# Catch stuff...
pred_sampled_areas<- rep(1, length(pred_lats))
pred_category<- rep(predict_category, length(pred_lats))
pred_vessel<- rep(predict_vessel, length(pred_lats))
# Renaming predict_covariates_df_all to match vast_fit_covariate_data
pred_cov_dat_name_order<- which(names(pred_df_knots) %in% names(vast_fitted_sdm$covariate_data))
pred_cov_dat_use<- pred_df_knots[,pred_cov_dat_name_order]
# Catchability data?
if(!is.null(vast_fitted_sdm$catchability_data)){
pred_catch_dat_use<- pred_cov_dat_use %>%
dplyr::select(., c(Year, Year_Cov, Season, Lat, Lon, Survey)
)
pred_catch_dat_use$Survey<- rep("NMFS", nrow(pred_catch_dat_use))
pred_catch_dat_use$Survey<- factor(pred_catch_dat_use$Survey, levels = c("NMFS", "DFO", "DUMMY"))
} else {
pred_catch_dat_use<- NULL
}
# Make the predictions
preds_out<- predict.fit_model_aja(x = vast_fitted_sdm, what = predict_variable, Lat_i = pred_lats, Lon_i = pred_lons, t_i = pred_times, a_i = pred_sampled_areas, c_iz = pred_category, NULL, new_covariate_data = pred_cov_dat_use, new_catchability_data = pred_catch_dat_use, do_checks = FALSE)
# Get everything as a dataframe to make plotting easier...
pred_df_out<- data.frame("Lat" = pred_lats, "Lon" = pred_lons, "Time" = pred_cov_dat_use[,{{time_col}}], "Pred" = preds_out)
# Save and return it
saveRDS(pred_df_out, file = paste(out_dir, "/pred_", predict_variable, "_", nice_category_names, ".rds", sep = "" ))
return(pred_df_out)
}
#' @title Prediction spatial summary
#'
#' @description Calculates average "availability" of fish biomass from SDM predictions within spatial area of interest
#'
#' @param pred_df = A dataframe with Lat, Lon, Time and Pred columns
#' @param spatial_areas =
#' @return What does this function return?
#'
#' @export
pred_spatial_summary<- function(pred_df, spatial_areas){
if(FALSE){
tar_load(vast_fit)
template = raster("~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/data/supporting/HighResTemplate.grd")
tar_load(vast_seasonal_data)
all_times = as.character(levels(vast_seasonal_data$YEAR_SEASON))
plot_times = NULL
tar_load(land_sf)
tar_load(shapefile)
mask = shapefile
land_color = "#d9d9d9"
res_data_path = "~/Box/RES_Data/"
xlim = c(-85, -55)
ylim = c(30, 50)
panel_or_gif = "gif"
panel_cols = NULL
panel_rows = NULL
}
# Plotting at spatial knots...
# Getting prediction array
pred_array<- log(vast_fit$Report$D_gct+1)
# Getting time info
if(!is.null(plot_times)){
plot_times<- all_times[which(all_times) %in% plot_times]
} else {
plot_times<- all_times
}
# Getting spatial information
spat_data<- vast_fit$extrapolation_list
loc_g<- spat_data$Data_Extrap[which(spat_data$Data_Extrap[, "Include"] > 0), c("Lon", "Lat")]
CRS_orig<- sp::CRS("+proj=longlat")
CRS_proj<- sp::CRS(spat_data$projargs)
land_sf<- st_crop(land_sf, xmin = xlim[1], ymin = ylim[1], xmax = xlim[2], ymax = ylim[2])
# Looping through...
rasts_out<- vector("list", dim(pred_array)[3])
rasts_range<- pred_array
rast_lims<- c(round(min(rasts_range)-0.000001, 2), round(max(rasts_range) + 0.0000001, 2))
if(dim(pred_array)[3] == 1){
df<- data.frame(loc_g, z = pred_array[,1,])
points_ll = st_as_sf(data_df, coords = c("Lon", "Lat"), crs = CRS_orig)
points_proj = points_ll %>%
st_transform(., crs = CRS_proj)
points_bbox<- st_bbox(points_proj)
raster_proj<- st_rasterize(points_proj)
raster_proj<- resample(raster_proj, raster(template))
plot_out<- ggplot() +
geom_stars(data = raster_proj, aes(x = x, y = y, fill = z)) +
scale_fill_viridis_c(name = "Density", option = "viridis", na.value = "transparent", limits = rast_lims) +
geom_sf(data = land_sf_proj, fill = land_color, lwd = 0.2) +
coord_sf(xlim = points_bbox[c(1,3)], ylim = points_bbox[c(2,4)], expand = FALSE, datum = sf::st_crs(CRS_proj))
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05))
ggsave(filename = paste(out_dir, file_name, ".png", sep = ""), plot_out, width = 11, height = 8, units = "in")
} else {
for (tI in 1:dim(pred_array)[3]) {
data_df<- data.frame(loc_g, z = pred_array[,1,tI])
# Interpolation
pred_df<- na.omit(data.frame("x" = data_df$Lon, "y" = data_df$Lat, "layer" = data_df$z))
pred_df_interp<- interp(pred_df[,1], pred_df[,2], pred_df[,3], duplicate = "mean", extrap = TRUE,
xo=seq(-87.99457, -57.4307, length = 115),
yo=seq(22.27352, 48.11657, length = 133))
pred_df_interp_final<- data.frame(expand.grid(x = pred_df_interp$x, y = pred_df_interp$y), z = c(round(pred_df_interp$z, 2)))
pred_sp<- st_as_sf(pred_df_interp_final, coords = c("x", "y"), crs = CRS_orig)
pred_df_temp<- pred_sp[which(st_intersects(pred_sp, mask, sparse = FALSE) == TRUE),]
coords_keep<- as.data.frame(st_coordinates(pred_df_temp))
row.names(coords_keep)<- NULL
pred_df_use<- data.frame(cbind(coords_keep, "z" = as.numeric(pred_df_temp$z)))
names(pred_df_use)<- c("x", "y", "z")
# raster_proj<- raster::rasterize(as_Spatial(points_ll), template, field = "z", fun = mean)
# raster_proj<- as.data.frame(raster_proj, xy = TRUE)
#
time_plot_use<- plot_times[tI]
rasts_out[[tI]]<- ggplot() +
geom_tile(data = pred_df_use, aes(x = x, y = y, fill = z)) +
scale_fill_viridis_c(name = "Log (density+1)", option = "viridis", na.value = "transparent", limits = rast_lims) +
annotate("text", x = -65, y = 37.5, label = time_plot_use) +
geom_sf(data = land_sf, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = FALSE) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt"))
}
if(panel_or_gif == "panel"){
# Panel plot
all_plot<- wrap_plots(rasts_out, ncol = panel_cols, nrow = panel_rows, guides = "collect", theme(plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt")))
ggsave(filename = paste(working_dir, file_name, ".png", sep = ""), all.plot, width = 11, height = 8, units = "in")
} else {
# Make a gif
plot_loop_func<- function(plot_list){
for (i in seq_along(plot_list)) {
plot_use<- plot_list[[i]]
print(plot_use)
}
}
invisible(save_gif(plot_loop_func(rasts_out), paste0(out_dir, nmfs_species_code, "_LogDensity.gif"), delay = 0.75, progress = FALSE))
}
}
}
#' @title Plot VAST model predicted density surfaces
#'
#' @description Creates either a panel plot or a gif of VAST model predicted density surfaces
#'
#' @param vast_fit = A VAST `fit_model` object.
#' @param nice_category_names = A
#' @param all_times = A vector of all of the unique time steps available from the VAST fitted model
#' @param plot_times = Either NULL to make a plot for each time in `all_times` or a vector of all of the times to plot, which must be a subset of `all_times`
#' @param land_sf = Land sf object
#' @param xlim = A two element vector with the min and max longitudes
#' @param ylim = A two element vector with the min and max latitudes
#' @param panel_or_gif = A character string of either "panel" or "gif" indicating how the multiple plots across time steps should be displayed
#' @param out_dir = Output directory to save the panel plot or gif
#'
#' @return A VAST fit_model object, with the inputs and and outputs, including parameter estimates, extrapolation gid info, spatial list info, data info, and TMB info.
#'
#' @export
vast_fit_plot_density<- function(vast_fit, nice_category_names, mask, all_times = all_times, plot_times = NULL, land_sf, xlim, ylim, panel_or_gif = "gif", out_dir, land_color = "#d9d9d9", panel_cols = NULL, panel_rows = NULL, ...){
if(FALSE){
tar_load(vast_fit)
template = raster("~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/data/supporting/HighResTemplate.grd")
tar_load(vast_seasonal_data)
all_times = as.character(levels(vast_seasonal_data$VAST_YEAR_SEASON))
plot_times = NULL
tar_load(land_sf)
tar_load(region_shapefile)
mask = region_shapefile
land_color = "#d9d9d9"
res_data_path = "~/Box/RES_Data/"
xlim = c(-85, -55)
ylim = c(30, 50)
panel_or_gif = "gif"
panel_cols = NULL
panel_rows = NULL
}
# Plotting at spatial knots...
# Getting prediction array
pred_array<- log(vast_fit$Report$D_gct+1)
# Getting time info
if(!is.null(plot_times)){
plot_times<- all_times[which(all_times) %in% plot_times]
} else {
plot_times<- all_times
}
# Getting spatial information
spat_data<- vast_fit$extrapolation_list
loc_g<- spat_data$Data_Extrap[which(spat_data$Data_Extrap[, "Include"] > 0), c("Lon", "Lat")]
CRS_orig<- sp::CRS("+proj=longlat")
CRS_proj<- sp::CRS(spat_data$projargs)
land_sf<- st_crop(land_sf, xmin = xlim[1], ymin = ylim[1], xmax = xlim[2], ymax = ylim[2])
# Looping through...
rasts_out<- vector("list", dim(pred_array)[3])
rasts_range<- pred_array
rast_lims<- c(0, round(max(rasts_range) + 0.0000001, 2))
if(dim(pred_array)[3] == 1){
data_df<- data.frame(loc_g, z = pred_array[,1,])
# Interpolation
pred_df<- na.omit(data.frame("x" = data_df$Lon, "y" = data_df$Lat, "layer" = data_df$z))
pred_df_interp<- interp(pred_df[,1], pred_df[,2], pred_df[,3], duplicate = "mean", extrap = TRUE,
xo=seq(-87.99457, -57.4307, length = 115),
yo=seq(22.27352, 48.11657, length = 133))
pred_df_interp_final<- data.frame(expand.grid(x = pred_df_interp$x, y = pred_df_interp$y), z = c(round(pred_df_interp$z, 2)))
pred_sp<- st_as_sf(pred_df_interp_final, coords = c("x", "y"), crs = CRS_orig)
pred_df_temp<- pred_sp[which(st_intersects(pred_sp, mask, sparse = FALSE) == TRUE),]
coords_keep<- as.data.frame(st_coordinates(pred_df_temp))
row.names(coords_keep)<- NULL
pred_df_use<- data.frame(cbind(coords_keep, "z" = as.numeric(pred_df_temp$z)))
names(pred_df_use)<- c("x", "y", "z")
# raster_proj<- raster::rasterize(as_Spatial(points_ll), template, field = "z", fun = mean)
# raster_proj<- as.data.frame(raster_proj, xy = TRUE)
#
time_plot_use<- plot_times
plot_out<- ggplot() +
geom_tile(data = pred_df_use, aes(x = x, y = y, fill = z)) +
scale_fill_viridis_c(name = "Log (density+1)", option = "viridis", na.value = "transparent", limits = rast_lims) +
annotate("text", x = -65, y = 37.5, label = time_plot_use) +
geom_sf(data = land_sf, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = FALSE) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt"))
ggsave(filename = paste(out_dir, nice_category_names, ".png", sep = "/"), plot_out, width = 11, height = 8, units = "in")
} else {
for (tI in 1:dim(pred_array)[3]) {
data_df<- data.frame(loc_g, z = pred_array[,1,tI])
# Interpolation
pred_df<- na.omit(data.frame("x" = data_df$Lon, "y" = data_df$Lat, "layer" = data_df$z))
pred_df_interp<- interp(pred_df[,1], pred_df[,2], pred_df[,3], duplicate = "mean", extrap = TRUE,
xo=seq(-87.99457, -57.4307, length = 115),
yo=seq(22.27352, 48.11657, length = 133))
pred_df_interp_final<- data.frame(expand.grid(x = pred_df_interp$x, y = pred_df_interp$y), z = c(round(pred_df_interp$z, 2)))
pred_sp<- st_as_sf(pred_df_interp_final, coords = c("x", "y"), crs = CRS_orig)
pred_df_temp<- pred_sp[which(st_intersects(pred_sp, mask, sparse = FALSE) == TRUE),]
coords_keep<- as.data.frame(st_coordinates(pred_df_temp))
row.names(coords_keep)<- NULL
pred_df_use<- data.frame(cbind(coords_keep, "z" = as.numeric(pred_df_temp$z)))
names(pred_df_use)<- c("x", "y", "z")
# raster_proj<- raster::rasterize(as_Spatial(points_ll), template, field = "z", fun = mean)
# raster_proj<- as.data.frame(raster_proj, xy = TRUE)
#
time_plot_use<- plot_times[tI]
rasts_out[[tI]]<- ggplot() +
geom_tile(data = pred_df_use, aes(x = x, y = y, fill = z)) +
scale_fill_viridis_c(name = "Log (density+1)", option = "viridis", na.value = "transparent", limits = rast_lims) +
annotate("text", x = -65, y = 37.5, label = time_plot_use) +
geom_sf(data = land_sf, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = FALSE) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt"))
}
if(panel_or_gif == "panel"){
# Panel plot
all_plot<- wrap_plots(rasts_out, ncol = panel_cols, nrow = panel_rows, guides = "collect", theme(plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt")))
ggsave(filename = paste0(out_dir, "/", nice_category_names, "_LogDensity.png"), all_plot, width = 11, height = 8, units = "in")
return(all_plot)
} else {
# Make a gif
plot_loop_func<- function(plot_list){
for (i in seq_along(plot_list)) {
plot_use<- plot_list[[i]]
print(plot_use)
}
}
invisible(save_gif(plot_loop_func(rasts_out), paste0(out_dir, "/", nice_category_names, "_LogDensity.gif"), delay = 0.75, progress = FALSE))
}
}
}
#' @title Plot predicted density surfaces from data frame
#'
#' @description Creates either a panel plot or a gif of predicted density surfaces from a data frame that has location and time information
#'
#' @param pred_df = A dataframe with Lat, Lon, Time and Pred columns
#' @param nice_category_names = A
#' @param mask = Land mask
#' @param plot_times = Either NULL to make a plot for each time in `pred_df$Time` or a vector of all of the times to plot, which must be a subset of `pred_df$Time`
#' @param land_sf = Land sf object
#' @param xlim = A two element vector with the min and max longitudes
#' @param ylim = A two element vector with the min and max latitudes
#' @param panel_or_gif = A character string of either "panel" or "gif" indicating how the multiple plots across time steps should be displayed
#' @param out_dir = Output directory to save the panel plot or gif
#'
#' @return NULL. Panel or gif plot is saved in out_dir.
#'
#' @export
vast_df_plot_density<- function(pred_df, nice_category_names, mask, all_times = all_times, plot_times = NULL, land_sf, xlim, ylim, panel_or_gif = "gif", out_dir, land_color = "#d9d9d9", panel_cols = NULL, panel_rows = NULL, ...){
if(FALSE){
tar_load(vast_predictions)
pred_df = vast_predictions
plot_times = NULL
tar_load(land_sf)
tar_load(region_shapefile)
mask = region_shapefile
land_color = "#d9d9d9"
res_data_path = "~/Box/RES_Data/"
xlim = c(-80, -55)
ylim = c(35, 50)
panel_or_gif = "gif"
panel_cols = NULL
panel_rows = NULL
}
# Time ID column for filtering
pred_df<- pred_df %>%
mutate(., "Time_Filter" = as.numeric(Time))
# Log transform pred_df$Pred
pred_df$Pred<- log(pred_df$Pred+1)
# Getting all unique times
all_times<- unique(pred_df$Time)
# Getting time info
if(!is.null(plot_times)){
plot_times<- all_times[which(all_times) %in% plot_times]
} else {
plot_times<- all_times
}
# Getting spatial information
land_sf<- st_crop(land_sf, xmin = xlim[1], ymin = ylim[1], xmax = xlim[2], ymax = ylim[2])
# Looping through...
rasts_out<- vector("list", length(plot_times))
rasts_range<- pred_df$Pred
rast_lims<- c(0, round(max(rasts_range) + 0.0000001, 2))
for (tI in 1:length(plot_times)) {
pred_df_temp<- pred_df %>%
dplyr::filter(., Time_Filter == tI)
# Interpolation
pred_df_temp<- na.omit(data.frame("x" = pred_df_temp$Lon, "y" = pred_df_temp$Lat, "layer" = pred_df_temp$Pred))
pred_df_interp<- interp(pred_df_temp[,1], pred_df_temp[,2], pred_df_temp[,3], duplicate = "mean", extrap = TRUE,
xo=seq(-87.99457, -57.4307, length = 115),
yo=seq(22.27352, 48.11657, length = 133))
pred_df_interp_final<- data.frame(expand.grid(x = pred_df_interp$x, y = pred_df_interp$y), z = c(round(pred_df_interp$z, 2)))
pred_sp<- st_as_sf(pred_df_interp_final, coords = c("x", "y"), crs = 4326)
pred_df_temp2<- pred_sp[which(st_intersects(pred_sp, mask, sparse = FALSE) == TRUE),]
coords_keep<- as.data.frame(st_coordinates(pred_df_temp2))
row.names(coords_keep)<- NULL
pred_df_use<- data.frame(cbind(coords_keep, "z" = as.numeric(pred_df_temp2$z)))
names(pred_df_use)<- c("x", "y", "z")
# raster_proj<- raster::rasterize(as_Spatial(points_ll), template, field = "z", fun = mean)
# raster_proj<- as.data.frame(raster_proj, xy = TRUE)
#
time_plot_use<- plot_times[tI]
rasts_out[[tI]]<- ggplot() +
geom_tile(data = pred_df_use, aes(x = x, y = y, fill = z)) +
scale_fill_viridis_c(name = "Log (density+1)", option = "viridis", na.value = "transparent", limits = rast_lims) +
annotate("text", x = -65, y = 37.5, label = time_plot_use) +
geom_sf(data = land_sf, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = FALSE) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt"))
}
if(panel_or_gif == "panel"){
# Panel plot
all_plot<- wrap_plots(rasts_out, ncol = panel_cols, nrow = panel_rows, guides = "collect", theme(plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt")))
ggsave(filename = paste0(out_dir, "/", nice_category_names, "_LogDensity.png", sep = ""), all.plot, width = 11, height = 8, units = "in")
} else {
# Make a gif
plot_loop_func<- function(plot_list){
for (i in seq_along(plot_list)) {
plot_use<- plot_list[[i]]
print(plot_use)
}
}
invisible(save_gif(plot_loop_func(rasts_out), paste0(out_dir, "/", nice_category_names, "_LogDensity.gif"), delay = 0.75, progress = FALSE))
}
}
predict.fit_model_aja<- function(x, what = "D_i", Lat_i, Lon_i, t_i, a_i, c_iz = rep(0,length(t_i)), v_i = rep(0,length(t_i)), new_covariate_data = NULL, new_catchability_data = NULL, do_checks = TRUE, working_dir = paste0(getwd(),"/")){
if(FALSE){
tar_load(vast_fit)
x = vast_fit
what = "D_i"
Lat_i = x$data_frame$Lat_i
#Lat_i = pred_cov_dat_use$Lat
Lon_i = x$data_frame$Lon_i
#Lon_i = pred_cov_dat_use$Lon
t_i = x$data_frame$t_i
#t_i = pred_cov_dat_use$Year
a_i<- x$data_frame$a_i
#a_i<- rep(unique(pred_sampled_areas), length(Lat_i))
c_iz = rep(0,length(t_i))
#c_iz<- rep(unique(predict_category), length(Lat_i))
v_i = rep(0,length(t_i))
#v_i<- rep(unique(predict_vessel), length(t_i))
new_covariate_data = NULL
#new_covariate_data = pred_cov_dat_use
new_catchability_data = NULL
#new_catchability_data = pred_catch_dat_use
do_checks = FALSE
x = vast_fit
what = "Index_gctl"
Lat_i = predict_covariates_df_all[,"DECDEG_BEGLAT"]
Lon_i = predict_covariates_df_all[,"DECDEG_BEGLON"]
t_i = predict_covariates_df_all[,"t_i"]
a_i = predict_covariates_df_all[,"a_i"]
c_iz = predict_covariates_df_all[,"c_iz"]
v_i = predict_covariates_df_all[,"v_i"]
new_covariate_data = pred_cov_dat_use
new_catchability_data = pred_catch_dat_use
do_checks = FALSE
working_dir = paste0(getwd(),"/")
# object = vast_fit
# x = object
# Lat_i = object$data_frame$Lat_i
# Lon_i = object$data_frame$Lon_i
# t_i = object$data_frame$t_i
# a_i = object$data_frame$a_i
# c_iz = rep(0,length(t_i))
# v_i = rep(0,length(t_i))
# what = "P1_iz"
# new_covariate_data = object$covariate_data
# new_catchability_data = object$catchability_data
# do_checks = FALSE
x = vast_fitted_sdm
what = predict_variable
Lat_i = pred_lats
Lon_i = pred_lons
t_i = pred_times
a_i = pred_sampled_areas
c_iz = pred_category
v_i = rep(0,length(t_i))
new_covariate_data = pred_cov_dat_use
new_catchability_data = pred_catch_dat_use
do_checks = FALSE
working_dir = paste0(getwd(), "/")
}
message("`predict.fit_model(.)` is in beta-testing, and please explore results carefully prior to using")
# Check issues
if( !(what%in%names(x$Report)) || (length(x$Report[[what]])!=x$data_list$n_i) ){
stop("`what` can only take a few options")
}
if( !is.null(new_covariate_data) ){
# Confirm all columns are available
if( !all(colnames(x$covariate_data) %in% colnames(new_covariate_data)) ){
stop("Please ensure that all columns of `x$covariate_data` are present in `new_covariate_data`")
}
# Eliminate unnecessary columns
new_covariate_data = new_covariate_data[,match(colnames(x$covariate_data),colnames(new_covariate_data))]
# Eliminate old-covariates that are also present in new_covariate_data
NN = RANN::nn2( query=x$covariate_data[,c('Lat','Lon','Year')], data=new_covariate_data[,c('Lat','Lon','Year')], k=1 )
if( any(NN$nn.dist==0) ){
x$covariate_data = x$covariate_data[-which(NN$nn.dist==0),,drop=FALSE]
}
}
if( !is.null(new_catchability_data) ){
# Confirm all columns are available
if( !all(colnames(x$catchability_data) %in% colnames(new_catchability_data)) ){
stop("Please ensure that all columns of `x$catchability_data` are present in `new_covariate_data`")
}
# Eliminate unnecessary columns
new_catchability_data = new_catchability_data[,match(colnames(x$catchability_data),colnames(new_catchability_data))]
# Eliminate old-covariates that are also present in new_covariate_data
NN = RANN::nn2( query=x$catchability_data[,c('Lat','Lon','Year')], data=new_catchability_data[,c('Lat','Lon','Year')], k=1 )
if( any(NN$nn.dist==0) ){
x$catchability_data = x$catchability_data[-which(NN$nn.dist==0),,drop=FALSE]
}
}
# Process covariates
covariate_data = rbind( x$covariate_data, new_covariate_data )
catchability_data = rbind( x$catchability_data, new_catchability_data )
# Process inputs
PredTF_i = c( x$data_list$PredTF_i, rep(1,length(t_i)) )
b_i = c( x$data_frame[,"b_i"], sample(c(0, 1), size = length(t_i), replace = TRUE))
c_iz = rbind( matrix(x$data_frame[,grep("c_iz",names(x$data_frame))]), matrix(c_iz) )
Lat_i = c( x$data_frame[,"Lat_i"], Lat_i )
Lon_i = c( x$data_frame[,"Lon_i"], Lon_i )
a_i = c( x$data_frame[,"a_i"], a_i )
v_i = c( x$data_frame[,"v_i"], v_i )
t_i = c( x$data_frame[,"t_i"], t_i )
#assign("b_i", b_i, envir=.GlobalEnv)
# Build information regarding spatial location and correlation
message("\n### Re-making spatial information")
spatial_args_new = list("anisotropic_mesh"=x$spatial_list$MeshList$anisotropic_mesh, "Kmeans"=x$spatial_list$Kmeans, "Lon_i"=Lon_i, "Lat_i"=Lat_i )
spatial_args_input = combine_lists( input=spatial_args_new, default=x$input_args$spatial_args_input )
spatial_list = do.call( what=make_spatial_info, args=spatial_args_input )
# Check spatial_list
if( !all.equal(spatial_list$MeshList,x$spatial_list$MeshList) ){
stop("`MeshList` generated during `predict.fit_model` doesn't match that of original fit; please email package author to report issue")
}
# Build data
# Do *not* restrict inputs to formalArgs(make_data) because other potential inputs are still parsed by make_data for backwards compatibility
message("\n### Re-making data object")
data_args_new = list( "c_iz"=c_iz, "b_i"=b_i, "a_i"=a_i, "v_i"=v_i, "PredTF_i"=PredTF_i,
"t_i"=t_i, "spatial_list"=spatial_list,
"covariate_data"=covariate_data, "catchability_data"=catchability_data )
data_args_input = combine_lists( input=data_args_new, default=x$input_args$data_args_input ) # Do *not* use args_to_use
data_list = do.call( what=make_data, args=data_args_input )
data_list$n_g = 0
# Build object
message("\n### Re-making TMB object")
model_args_default = list("TmbData"=data_list, "RunDir"=working_dir, "Version"=x$settings$Version, "RhoConfig"=x$settings$RhoConfig, "loc_x"=spatial_list$loc_x, "Method"=spatial_list$Method, "Map" = x$tmb_list$Map)
model_args_input = combine_lists( input=list("Parameters"=x$ParHat),
default=model_args_default, args_to_use=formalArgs(make_model) )
tmb_list = do.call( what=make_model, args=model_args_input )
# Extract output
Report = tmb_list$Obj$report()
Y_i = Report[[what]][(1+nrow(x$data_frame)):length(Report$D_i)]
# sanity check
#if( all.equal(covariate_data,x$covariate_data) & Report$jnll!=x$Report$jnll){
if( do_checks==TRUE && (Report$jnll!=x$Report$jnll) ){
message("Problem detected in `predict.fit_model`; returning outputs for diagnostic purposes")
Return = list("Report"=Report, "data_list"=data_list)
return(Return)
}
# return prediction
return(Y_i)
}
match_strata_fn_aja <- function(points, strata_dataframe, index_shapes) {
if(FALSE){
points = Tmp
l = 1
strata_dataframe = strata.limits[l, , drop = FALSE]
index_shapes = index_shapes
}
if(is.null(index_shapes)){
# Default all strata
match_latitude_TF = match_longitude_TF = match_depth_TF = rep( TRUE, nrow(strata_dataframe))
if( all(c("south_border","north_border") %in% names(strata_dataframe)) ){
match_latitude_TF = as.numeric(x["BEST_LAT_DD"])>strata_dataframe[,'south_border'] & as.numeric(x["BEST_LAT_DD"])<=strata_dataframe[,'north_border']
}
if( all(c("west_border","east_border") %in% names(strata_dataframe)) ){
match_longitude_TF = as.numeric(x["BEST_LON_DD"])>strata_dataframe[,'west_border'] & as.numeric(x["BEST_LON_DD"])<=strata_dataframe[,'east_border']
}
if( all(c("shallow_border","deep_border") %in% names(strata_dataframe)) ){
match_depth_TF = as.numeric(x["BEST_DEPTH_M"])>strata_dataframe[,'shallow_border'] & as.numeric(x["BEST_DEPTH_M"])<=strata_dataframe[,'deep_border']
}
# Return stuff
Char = as.character(strata_dataframe[match_latitude_TF & match_longitude_TF & match_depth_TF,"STRATA"])
return(ifelse(length(Char)==0,NA,Char))
}
# Andrew edit...
if(!is.null(index_shapes)){
Tmp_sf<- data.frame(points) %>%
st_as_sf(., coords = c("BEST_LON_DD", "BEST_LAT_DD"), crs = st_crs(index_shapes), remove = FALSE)
match_shape<- Tmp_sf %>%
st_join(., index_shapes, join = st_within) %>%
mutate(., "Row_ID" = seq(from = 1, to = nrow(.))) %>%
st_drop_geometry() %>%
dplyr::select(., Region) %>%
as.vector()
return(match_shape)
}
}
Prepare_User_Extrapolation_Data_Fn_aja<- function (input_grid, strata.limits = NULL, projargs = NA, zone = NA, flip_around_dateline = TRUE, index_shapes, ...) {
if(FALSE){
# Run make_extrapolation_info_aja first...
strata.limits = strata.limits
input_grid = input_grid
projargs = projargs
zone = zone
flip_around_dateline = flip_around_dateline
index_shapes = index_shapes
}
if (is.null(strata.limits)) {
strata.limits = data.frame(STRATA = "All_areas")
}
message("Using strata ", strata.limits)
Data_Extrap <- input_grid
Area_km2_x = Data_Extrap[, "Area_km2"]
Tmp = cbind(BEST_LAT_DD = Data_Extrap[, "Lat"], BEST_LON_DD = Data_Extrap[, "Lon"])
if ("Depth" %in% colnames(Data_Extrap)) {
Tmp = cbind(Tmp, BEST_DEPTH_M = Data_Extrap[, "Depth"])
}
a_el = as.data.frame(matrix(NA, nrow = nrow(Data_Extrap), ncol = nrow(strata.limits), dimnames = list(NULL, strata.limits[, "STRATA"])))
for (l in 1:ncol(a_el)) {
a_el[, l] = match_strata_fn_aja(points = Tmp, strata_dataframe = strata.limits[l, , drop = FALSE], index_shapes = index_shapes[index_shapes$Region == as.character(strata.limits[l, , drop = FALSE]),])
a_el[, l] = ifelse(is.na(a_el[, l]), 0, Area_km2_x)
}
tmpUTM = project_coordinates(X = Data_Extrap[, "Lon"], Y = Data_Extrap[, "Lat"], projargs = projargs, zone = zone, flip_around_dateline = flip_around_dateline)
Data_Extrap = cbind(Data_Extrap, Include = 1)
if (all(c("E_km", "N_km") %in% colnames(Data_Extrap))) {
Data_Extrap[, c("E_km", "N_km")] = tmpUTM[, c("X", "Y")]
} else {
Data_Extrap = cbind(Data_Extrap, E_km = tmpUTM[, "X"], N_km = tmpUTM[, "Y"])
}
Return = list(a_el = a_el, Data_Extrap = Data_Extrap, zone = attr(tmpUTM, "zone"), projargs = attr(tmpUTM, "projargs"), flip_around_dateline = flip_around_dateline, Area_km2_x = Area_km2_x)
return(Return)
}
make_extrapolation_info_aja<- function (Region, projargs = NA, zone = NA, strata.limits = data.frame(STRATA = "All_areas"), create_strata_per_region = FALSE, max_cells = NULL, input_grid = NULL, observations_LL = NULL, grid_dim_km = c(2, 2), maximum_distance_from_sample = NULL, grid_in_UTM = TRUE, grid_dim_LL = c(0.1, 0.1), region = c("south_coast", "west_coast"), strata_to_use = c("SOG", "WCVI", "QCS", "HS", "WCHG"), epu_to_use = c("All", "Georges_Bank", "Mid_Atlantic_Bight", "Scotian_Shelf", "Gulf_of_Maine", "Other")[1], survey = "Chatham_rise", surveyname = "propInWCGBTS", flip_around_dateline, nstart = 100, area_tolerance = 0.05, backwards_compatible_kmeans = FALSE, DirPath = paste0(getwd(), "/"), index_shapes, ...) {
if(FALSE){
# First run fit_model_aja...
Region = settings$Region
projargs = NA
zone = settings$zone
strata.limits = settings$strata.limits
create_strata_per_region = FALSE
max_cells = settings$max_cells
input_grid = input_grid
observations_LL = NULL
grid_dim_km = settings$grid_size_km
maximum_distance_from_sample = NULL
index_shapes = index_shapes
}
if (is.null(max_cells))
max_cells = Inf
for (rI in seq_along(Region)) {
Extrapolation_List = NULL
if (tolower(Region[rI]) == "user") {
if (is.null(input_grid)) {
stop("Because you're using a user-supplied region, please provide 'input_grid' input")
}
if (!(all(c("Lat", "Lon", "Area_km2") %in% colnames(input_grid)))) {
stop("'input_grid' must contain columns named 'Lat', 'Lon', and 'Area_km2'")
}
if (missing(flip_around_dateline))
flip_around_dateline = FALSE
Extrapolation_List = Prepare_User_Extrapolation_Data_Fn_aja(strata.limits = strata.limits, input_grid = input_grid, projargs = projargs, zone = zone, flip_around_dateline = flip_around_dateline, index_shapes = index_shapes, ...)
}
if (is.null(Extrapolation_List)) {
if (is.null(observations_LL)) {
stop("Because you're using a new Region[rI], please provide 'observations_LL' input with columns named `Lat` and `Lon`")
}
if (missing(flip_around_dateline))
flip_around_dateline = FALSE
Extrapolation_List = Prepare_Other_Extrapolation_Data_Fn(strata.limits = strata.limits, observations_LL = observations_LL, grid_dim_km = grid_dim_km, maximum_distance_from_sample = maximum_distance_from_sample, grid_in_UTM = grid_in_UTM, grid_dim_LL = grid_dim_LL, projargs = projargs, zone = zone, flip_around_dateline = flip_around_dateline, ...)
}
if (rI == 1) {
Return = Extrapolation_List
} else {
Return = combine_extrapolation_info(Return, Extrapolation_List, create_strata_per_region = create_strata_per_region)
}
}
if (max_cells < nrow(Return$Data_Extrap)) {
message("# Reducing extrapolation-grid from ", nrow(Return$Data_Extrap), " to ", max_cells, " cells for Region(s): ", paste(Region, collapse = ", "))
loc_orig = Return$Data_Extrap[, c("E_km", "N_km")]
loc_orig = loc_orig[which(Return$Area_km2_x > 0), ]
Kmeans = make_kmeans(n_x = max_cells, loc_orig = loc_orig, nstart = nstart, randomseed = 1, iter.max = 1000, DirPath = DirPath, Save_Results = TRUE, kmeans_purpose = "extrapolation", backwards_compatible_kmeans = backwards_compatible_kmeans)
Kmeans[["cluster"]] = RANN::nn2(data = Kmeans[["centers"]], query = Return$Data_Extrap[, c("E_km", "N_km")], k = 1)$nn.idx[, 1]
aggregate_vector = function(values_x, index_x, max_index, FUN = sum) {
tapply(values_x, INDEX = factor(index_x, levels = 1:max_index), FUN = FUN)
}
a_el = matrix(NA, nrow = max_cells, ncol = ncol(Return$a_el))
for (lI in 1:ncol(Return$a_el)) {
a_el[, lI] = aggregate_vector(values_x = Return$a_el[, lI], index_x = Kmeans$cluster, max_index = max_cells)
}
Area_km2_x = aggregate_vector(values_x = Return$Area_km2_x, index_x = Kmeans$cluster, max_index = max_cells)
Include = aggregate_vector(values_x = Return$Data_Extrap[, "Include"], index_x = Kmeans$cluster, max_index = max_cells, FUN = function(vec) {
any(vec > 0)
})
lonlat_g = project_coordinates(X = Kmeans$centers[, "E_km"], Y = Kmeans$centers[, "N_km"], projargs = "+proj=longlat +ellps=WGS84", origargs = Return$projargs)
Data_Extrap = cbind(Lon = lonlat_g[, 1], Lat = lonlat_g[, 2], Include = Include, Kmeans$centers)
Return = list(a_el = a_el, Data_Extrap = Data_Extrap, zone = Return$zone, projargs = Return$projargs, flip_around_dateline = Return$flip_around_dateline, Area_km2_x = Area_km2_x)
}
if (length(Region) > 1 & create_strata_per_region == TRUE) {
Return$a_el = cbind(Total = rowSums(Return$a_el), Return$a_el)
}
class(Return) = "make_extrapolation_info"
return(Return)
}
fit_model_aja<- function (settings, Method, Lat_i, Lon_i, t_i, b_i, a_i, c_iz = rep(0, length(b_i)), v_i = rep(0, length(b_i)), working_dir = paste0(getwd(), "/"), X1config_cp = NULL, X2config_cp = NULL, covariate_data, X1_formula = ~0, X2_formula = ~0, Q1config_k = NULL, Q2config_k = NULL, catchability_data, Q1_formula = ~0, Q2_formula = ~0, newtonsteps = 1, silent = TRUE, build_model = TRUE, run_model = TRUE, test_fit = TRUE, ...) {
if(FALSE){
#Run vast_fit_sdm first...
"settings" = settings
"input_grid" = extrap_grid
"Lat_i" = sample_data[, 'Lat']
"Lon_i" = sample_data[, 'Lon']
"t_i" = sample_data[, 'Year']
"c_i" = rep(0, nrow(sample_data))
"b_i" = sample_data[, 'Biomass']
"v_i" = rep(0, length(b_i))
"a_i" = sample_data[, 'Swept']
"PredTF_i" = sample_data[, 'Pred_TF']
"X1config_cp" = Xconfig_list[['X1config_cp']]
"X2config_cp" = Xconfig_list[['X2config_cp']]
"covariate_data" = covariate_data
"X1_formula" = X1_formula
"X2_formula" = X2_formula
"X_contrasts" = X_contrasts
"catchability_data" = catchability_data
"Q1_formula" = Q1_formula
"Q2_formula" = Q2_formula
"Q1config_k" = Xconfig_list[['Q1config_k']]
"Q2config_k" = Xconfig_list[['Q2config_k']]
"newtonsteps" = 1
"getsd" = TRUE
"getReportCovariance" = TRUE
"run_model" = FALSE
"test_fit" = FALSE
"Use_REML" = FALSE
"getJointPrecision" = FALSE
"index_shapes" = index_shapes
# Now, go into make_extrapolation_info_aja
}
extra_args = list(...)
extra_args = c(extra_args, extra_args$extrapolation_args, extra_args$spatial_args, extra_args$optimize_args, extra_args$model_args)
data_frame = data.frame(Lat_i = Lat_i, Lon_i = Lon_i, a_i = a_i, v_i = v_i, b_i = b_i, t_i = t_i, c_iz = c_iz)
year_labels = seq(min(t_i), max(t_i))
years_to_plot = which(year_labels %in% t_i)
message("\n### Writing output from `fit_model` in directory: ", working_dir)
dir.create(working_dir, showWarnings = FALSE, recursive = TRUE)
capture.output(settings, file = file.path(working_dir, "settings.txt"))
message("\n### Making extrapolation-grid")
extrapolation_args_default = list(Region = settings$Region, strata.limits = settings$strata.limits, zone = settings$zone, max_cells = settings$max_cells, DirPath = working_dir)
extrapolation_args_input = combine_lists(input = extra_args, default = extrapolation_args_default, args_to_use = formalArgs(make_extrapolation_info_aja))
extrapolation_list = do.call(what = make_extrapolation_info_aja, args = extrapolation_args_input)
message("\n### Making spatial information")
spatial_args_default = list(grid_size_km = settings$grid_size_km, n_x = settings$n_x, Method = Method, Lon_i = Lon_i, Lat_i = Lat_i, Extrapolation_List = extrapolation_list, DirPath = working_dir, Save_Results = TRUE, fine_scale = settings$fine_scale, knot_method = settings$knot_method)
spatial_args_input = combine_lists(input = extra_args, default = spatial_args_default, args_to_use = c(formalArgs(make_spatial_info), formalArgs(INLA::inla.mesh.create)))
spatial_list = do.call(what = make_spatial_info, args = spatial_args_input)
message("\n### Making data object")
if (missing(covariate_data))
covariate_data = NULL
if (missing(catchability_data))
catchability_data = NULL
data_args_default = list(Version = settings$Version, FieldConfig = settings$FieldConfig, OverdispersionConfig = settings$OverdispersionConfig, RhoConfig = settings$RhoConfig, VamConfig = settings$VamConfig, ObsModel = settings$ObsModel, c_iz = c_iz, b_i = b_i, a_i = a_i, v_i = v_i, s_i = spatial_list$knot_i - 1, t_i = t_i, spatial_list = spatial_list, Options = settings$Options, Aniso = settings$use_anisotropy, X1config_cp = X1config_cp, X2config_cp = X2config_cp, covariate_data = covariate_data, X1_formula = X1_formula, X2_formula = X2_formula, Q1config_k = Q1config_k, Q2config_k = Q2config_k, catchability_data = catchability_data, Q1_formula = Q1_formula, Q2_formula = Q2_formula)
data_args_input = combine_lists(input = extra_args, default = data_args_default)
data_list = do.call(what = make_data, args = data_args_input)
message("\n### Making TMB object")
model_args_default = list(TmbData = data_list, RunDir = working_dir, Version = settings$Version, RhoConfig = settings$RhoConfig, loc_x = spatial_list$loc_x, Method = spatial_list$Method, build_model = build_model)
model_args_input = combine_lists(input = extra_args, default = model_args_default, args_to_use = formalArgs(make_model))
tmb_list = do.call(what = make_model, args = model_args_input)
if (run_model == FALSE | build_model == FALSE) {
input_args = list(extra_args = extra_args, extrapolation_args_input = extrapolation_args_input, model_args_input = model_args_input, spatial_args_input = spatial_args_input, data_args_input = data_args_input)
Return = list(data_frame = data_frame, extrapolation_list = extrapolation_list, spatial_list = spatial_list, data_list = data_list, tmb_list = tmb_list, year_labels = year_labels, years_to_plot = years_to_plot, settings = settings, input_args = input_args)
class(Return) = "fit_model"
return(Return)
}
if (silent == TRUE)
tmb_list$Obj$env$beSilent()
if (test_fit == TRUE) {
message("\n### Testing model at initial values")
LogLike0 = tmb_list$Obj$fn(tmb_list$Obj$par)
Gradient0 = tmb_list$Obj$gr(tmb_list$Obj$par)
if (any(Gradient0 == 0)) {
message("\n")
stop("Please check model structure; some parameter has a gradient of zero at starting values\n",
call. = FALSE)
} else {
message("Looks good: All fixed effects have a nonzero gradient")
}
}
message("\n### Estimating parameters")
optimize_args_default1 = list(lower = tmb_list$Lower, upper = tmb_list$Upper, loopnum = 2)
optimize_args_default1 = combine_lists(default = optimize_args_default1, input = extra_args, args_to_use = formalArgs(TMBhelper::fit_tmb))
optimize_args_input1 = list(obj = tmb_list$Obj, savedir = NULL, newtonsteps = 0, bias.correct = FALSE, control = list(eval.max = 10000, iter.max = 10000, trace = 1), quiet = TRUE, getsd = FALSE)
optimize_args_input1 = combine_lists(default = optimize_args_default1, input = optimize_args_input1, args_to_use = formalArgs(TMBhelper::fit_tmb))
parameter_estimates = do.call(what = TMBhelper::fit_tmb, args = optimize_args_input1)
if (exists("check_fit") & test_fit == TRUE) {
problem_found = VAST::check_fit(parameter_estimates)
if (problem_found == TRUE) {
message("\n")
stop("Please change model structure to avoid problems with parameter estimates and then re-try; see details in `?check_fit`\n", call. = FALSE)
}
}
optimize_args_default2 = list(obj = tmb_list$Obj, lower = tmb_list$Lower, upper = tmb_list$Upper, savedir = working_dir, bias.correct = settings$bias.correct, newtonsteps = newtonsteps, bias.correct.control = list(sd = FALSE, split = NULL, nsplit = 1, vars_to_correct = settings$vars_to_correct), control = list(eval.max = 10000, iter.max = 10000, trace = 1), loopnum = 1, getJointPrecision = TRUE)
optimize_args_input2 = combine_lists(input = extra_args, default = optimize_args_default2, args_to_use = formalArgs(TMBhelper::fit_tmb))
optimize_args_input2 = combine_lists(input = list(startpar = parameter_estimates$par), default = optimize_args_input2)
parameter_estimates = do.call(what = TMBhelper::fit_tmb, args = optimize_args_input2)
if ("par" %in% names(parameter_estimates)) {
Report = tmb_list$Obj$report()
ParHat = tmb_list$Obj$env$parList(parameter_estimates$par)
} else {
Report = ParHat = "Model is not converged"
}
input_args = list(extra_args = extra_args, extrapolation_args_input = extrapolation_args_input, model_args_input = model_args_input, spatial_args_input = spatial_args_input, optimize_args_input1 = optimize_args_input1, optimize_args_input2 = optimize_args_input2, data_args_input = data_args_input)
Return = list(data_frame = data_frame, extrapolation_list = extrapolation_list, spatial_list = spatial_list, data_list = data_list, tmb_list = tmb_list, parameter_estimates = parameter_estimates, Report = Report, ParHat = ParHat, year_labels = year_labels, years_to_plot = years_to_plot, settings = settings, input_args = input_args, X1config_cp = X1config_cp, X2config_cp = X2config_cp, covariate_data = covariate_data, X1_formula = X1_formula, X2_formula = X2_formula, Q1config_k = Q1config_k, Q2config_k = Q1config_k, catchability_data = catchability_data, Q1_formula = Q1_formula, Q2_formula = Q2_formula)
Return$effects = list()
if (!is.null(catchability_data)) {
catchability_data_full = data.frame(catchability_data, linear_predictor = 0)
Q1_formula_full = update.formula(Q1_formula, linear_predictor ~ . + 0)
call_Q1 = lm(Q1_formula_full, data = catchability_data_full)$call
Q2_formula_full = update.formula(Q2_formula, linear_predictor ~ . + 0)
call_Q2 = lm(Q2_formula_full, data = catchability_data_full)$call
Return$effects = c(Return$effects, list(call_Q1 = call_Q1, call_Q2 = call_Q2, catchability_data_full = catchability_data_full))
}
if (!is.null(covariate_data)) {
covariate_data_full = data.frame(covariate_data, linear_predictor = 0)
X1_formula_full = update.formula(X1_formula, linear_predictor ~ . + 0)
call_X1 = lm(X1_formula_full, data = covariate_data_full)$call
X2_formula_full = update.formula(X2_formula, linear_predictor ~ . + 0)
call_X2 = lm(X2_formula_full, data = covariate_data_full)$call
Return$effects = c(Return$effects, list(call_X1 = call_X1, call_X2 = call_X2, covariate_data_full = covariate_data_full))
}
class(Return) = "fit_model"
return(Return)
}
vast_read_region_shape<- function(region_shapefile_dir){
region_file<- list.files(region_shapefile_dir, pattern = ".shp", full.names = TRUE)
region_sf<- st_read(region_file)
return(region_sf)
}
vast_read_index_shapes<- function(index_shapefiles_dir){
if(FALSE){
index_shapefiles_dir<- "~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/data/supporting/index_shapefiles/"
index_shapefiles_dir<- "~/data/supporting/index_shapefiles/"
}
index_files<- list.files(index_shapefiles_dir, pattern = ".shp", full.names = TRUE)
for(i in seq_along(index_files)){
index_shapes_temp<- st_read(index_files[i])
if(i == 1){
index_shapes_out<- index_shapes_temp
} else {
index_shapes_out<- bind_rows(index_shapes_out, index_shapes_temp)
}
}
return(index_shapes_out)
}
######
## Getting abundance index time series
######
get_vast_index_timeseries<- function(vast_fit, all_times, nice_category_names, index_scale = c("raw", "log"), out_dir){
if(FALSE){
tar_load(vast_fit)
all_times = levels(vast_seasonal_data$VAST_YEAR_SEASON)
nice_category_names = "American lobster"
index_scale = "raw"
out_dir = paste0(res_root, "tables")
tar_load(vast_fit)
vast_fit = vast_fitted
nice_category_names = "Atlantic halibut"
index_scale = "raw"
out_dir = here::here("scratch/aja/TargetsSDM/results/tables")
}
TmbData<- vast_fit$data_list
Sdreport<- vast_fit$parameter_estimates$SD
# Time series steps
time_ind<- 1:TmbData$n_t
time_labels<- sort(unique(vast_fit$data_frame$t_i)[time_ind])
# Index regions
index_regions_ind<- 1:TmbData$n_l
index_regions<- vast_fit$settings$strata.limits$STRATA[index_regions_ind]
# Categories
categories_ind<- 1:TmbData$n_c
# Get the index information
SD<- TMB::summary.sdreport(Sdreport)
SD_stderr<- TMB:::as.list.sdreport(Sdreport, what = "Std. Error", report = TRUE)
SD_estimate<- TMB:::as.list.sdreport(Sdreport, what = "Estimate", report = TRUE)
if(vast_fit$settings$bias.correct == TRUE && "unbiased" %in% names(Sdreport)){
SD_estimate_biascorrect<- TMB:::as.list.sdreport(Sdreport, what = "Std. (bias.correct)", report = TRUE)
}
# Now, populate array with values
Index_ctl = log_Index_ctl = array(NA, dim = c(unlist(TmbData[c('n_c','n_t','n_l')]), 2), dimnames = list(categories_ind, time_labels, index_regions, c('Estimate','Std. Error')))
if(index_scale == "raw"){
if(vast_fit$settings$bias.correct == TRUE && "unbiased" %in% names(Sdreport)){
Index_ctl[] = SD[which(rownames(SD) == "Index_ctl"),c('Est. (bias.correct)','Std. Error')]
} else {
Index_ctl[]<- SD[which(rownames(SD) == "Index_ctl"), c('Estimate','Std. Error')]
}
index_res_array<- Index_ctl
} else {
if(vast_fit$settings$bias.correct == TRUE && "unbiased" %in% names(Sdreport)){
log_Index_ctl[] = SD[which(rownames(SD) == "ln_Index_ctl"),c('Est. (bias.correct)','Std. Error')]
} else {
log_Index_ctl[]<- SD[which(rownames(SD) == "ln_Index_ctl"), c('Estimate','Std. Error')]
}
index_res_array<- log_Index_ctl
}
# Data manipulation to get out out the array and to something more "plottable"
for(i in seq_along(categories_ind)){
index_array_temp<- index_res_array[i, , , ]
index_res_temp_est<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,1]) %>%
pivot_longer(cols = -c(Time, Category), names_to = "Index_Region", values_to = "Index_Estimate")
index_res_temp_sd<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,2]) %>%
pivot_longer(cols = -c(Time, Category), names_to = "Index_Region", values_to = "Index_SD")
index_res_temp_out<- index_res_temp_est %>%
left_join(., index_res_temp_sd)
if(i == 1){
index_res_out<- index_res_temp_out
} else {
index_res_out<- bind_rows(index_res_out, index_res_temp_out)
}
# if(dim(index_array_temp)[2] == 3){
# index_res_temp_est<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,1]) %>%
# pivot_longer(cols = -c(Time, Category), names_to = "Index_Region", values_to = "Index_Estimate")
# index_res_temp_sd<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,2]) %>%
# pivot_longer(cols = -c(Time, Category), names_to = "Index_Region", values_to = "Index_SD")
# index_res_temp_out<- index_res_temp_est %>%
# left_join(., index_res_temp_sd)
#
# if(i == 1){
# index_res_out<- index_res_temp_out
# } else {
# index_res_out<- bind_rows(index_res_out, index_res_temp_out)
# }
# } else if(as.numeric(dim(index_array_temp)[2]) == 2){
# index_res_temp_est<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,1]) %>%
# pivot_longer(cols = -c(Time, Category), names_to = "Index_Region", values_to = "Index_Estimate")
# index_res_temp_sd<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,2]) %>%
# pivot_longer(cols = -c(Time, Category), names_to = "Index_Region", values_to = "Index_SD")
# index_res_temp_out<- index_res_temp_est %>%
# left_join(., index_res_temp_sd)
#
# if(i == 1){
# index_res_out<- index_res_temp_out
# } else {
# index_res_out<- bind_rows(index_res_out, index_res_temp_out)
# }
# }
}
# if(!is.null(vast_fit$covariate_data)){
# year_start<- min(as.numeric(as.character(vast_fit$covariate_data$Year_Cov)))
#
# if(any(grepl("Season", vast_fit$X1_formula))){
# seasons<- nlevels(unique(vast_fit$covariate_data$Season))
# if(seasons == 3 & max(time_labels) == 347){
# time_labels_use<- paste(rep(seq(from = year_start, to = 2100), each = 3), rep(c("SPRING", "SUMMER", "FALL")), sep = "-")
# }
# } else {
# time_labels_use<- paste(rep(seq(from = year_start, to = 2100), each = 1), rep(c("FALL")), sep = "-")
# }
#
# index_res_out$Date<- factor(rep(time_labels_use, length(index_regions)), levels = time_labels_use)
#
# } else {
# # Just basic years...
# time_labels_use<- seq(from = min(vast_fit$year_labels), to = max(vast_fit$year_labels))
# index_res_out$Date<- factor(rep(time_labels_use, each = length(index_regions)), levels = time_labels_use)
# }
#
index_res_out$Date<- rep(factor(all_times, levels = all_times), each = length(unique(index_res_out$Index_Region)))
# Date info
index_res_out<- index_res_out %>%
mutate(., Year = as.numeric(gsub("([0-9]+).*$", "\\1", Date)))
if(any(str_detect(as.character(index_res_out$Date), LETTERS))){
index_res_out$Date<- as.Date(paste(index_res_out$Year, ifelse(grepl("SPRING", index_res_out$Date), "-04-15",
ifelse(grepl("SUMMER", index_res_out$Date), "-07-15", "-10-15")), sep = ""))
} else {
index_res_out$Date<- as.Date(paste(index_res_out$Year, "-06-15", sep = ""))
}
# Save and return it
write.csv(index_res_out, file = paste(out_dir, "/Biomass_Index_", index_scale, "_", nice_category_names, ".csv", sep = ""))
return(index_res_out)
}
plot_vast_index_timeseries<- function(index_res_df, year_stop = NULL, index_scale, nice_category_names, nice_xlab, nice_ylab, paneling = c("category", "index_region", "none"), color_pal = c('#66c2a5','#fc8d62','#8da0cb'), out_dir){
if(FALSE){
tar_load(biomass_indices)
index_res_df<- index_res_out
index_res_df<- biomass_indices
nice_category_names<- "American lobster"
nice_xlab = "Year-Season"
nice_ylab = "Biomass index (metric tons)"
color_pal = NULL
paneling<- "none"
date_breaks<- "5 year"
out_dir = paste0(res_root, "plots_maps")
}
if(paneling == "none"){
if(!is.null(color_pal)){
colors_use<- color_pal
} else {
color_pal<- c('#66c2a5','#fc8d62','#8da0cb','#e78ac3','#a6d854')
colors_use<- color_pal[1:length(unique(index_res_df$Index_Region))]
}
# Filter based on years to plot
if(!is.null(year_stop)){
index_res_df<- index_res_df %>%
filter(., Year < year_stop)
}
plot_out<- ggplot() +
geom_errorbar(data = index_res_df, aes(x = Date, ymin = (Index_Estimate - Index_SD), ymax = (Index_Estimate + Index_SD), color = Index_Region, group = Index_Region)) +
geom_point(data = index_res_df, aes(x = Date, y = Index_Estimate, color = Index_Region)) +
scale_color_manual(values = colors_use) +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
xlab({{nice_xlab}}) +
ylab({{nice_ylab}}) +
ggtitle({{nice_category_names}}) +
theme_bw() +
theme(legend.title = element_blank(), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1))
}
# Save and return the plot
ggsave(plot_out, file = paste(out_dir, "/Biomass_Index_", index_scale, "_", nice_category_names, ".jpg", sep = ""))
return(plot_out)
}
######
## Plot parameter effects...
######
#' @title Adapts package \code{effects}
#'
#' @inheritParams effects::Effect
#' @param which_formula which formula to use e.g., \code{"X1"}
#'
#' @rawNamespace S3method(effects::Effect, fit_model)
#' @export
Effect.fit_model_aja<- function(focal.predictors, mod, which_formula = "X1", pad_values = c(), ...){
if(FALSE){
tar_load(vast_fit)
focal.predictors = c("Depth", "SST_seasonal", "BT_seasonal")
mod = fit_base
which_formula = "X1"
xlevels = 100
pad_values = c(0)
covariate_data_full<- mod$effects$covariate_data_full
catchability_data_full<- mod$effects$catchability_data_full
}
# Error checks
if(mod$data_list$n_c > 1 & which_formula %in% c("X1", "X2")){
stop("`Effect.fit_model` is not currently designed for multivariate models using density covariates")
}
if(!all(c("covariate_data_full", "catchability_data_full") %in% ls(.GlobalEnv))){
stop("Please load `covariate_data_full` and `catchability_data_full` into global memory")
}
if(!requireNamespace("effects")){
stop("please install the effects package")
}
if(!("effects" %in% names(mod))){
stop("`effects` slot not detected in input to `Effects.fit_model`. Please update model using later package version.")
}
# Identify formula-specific stuff
if(which_formula=="X1"){
formula_orig = mod$X1_formula
parname = "gamma1_cp"
mod$call = mod$effects$call_X1
}else if(which_formula=="X2"){
formula_orig = mod$X2_formula
parname = "gamma2_cp"
mod$call = mod$effects$call_X2
}else if(which_formula=="Q1"){
formula_orig = mod$Q1_formula
parname = "lambda1_k"
mod$call = mod$effects$call_Q1
}else if(which_formula=="Q2"){
formula_orig = mod$Q2_formula
parname = "lambda2_k"
mod$call = mod$effects$call_Q2
}else{
stop("Check `which_formula` input")
}
# Extract parameters / covariance
whichnum = which(names(mod$parameter_estimates$par) == parname)
mod$parhat = mod$parameter_estimates$par[whichnum]
if(is.null(mod$parameter_estimates$SD$cov.fixed)){
mod$covhat = array(0, dim = rep(length(mod$parhat), 2))
} else {
mod$covhat = mod$parameter_estimates$SD$cov.fixed[whichnum, whichnum, drop = FALSE]
}
# # Fill in values that are mapped off
# if(parname %in% names(mod$tmb_list$Obj$env$map)){
# mod$parhat = mod$parhat[mod$tmb_list$Obj$env$map[[parname]]]
# mod$covhat = mod$covhat[mod$tmb_list$Obj$env$map[[parname]], mod$tmb_list$Obj$env$map[[parname]], drop = FALSE]
# mod$parhat = ifelse(is.na(mod$parhat), 0, mod$parhat)
# mod$covhat = ifelse(is.na(mod$covhat), 0, mod$covhat)
# }
# add names
names(mod$parhat)[] = parname
if(length(pad_values) != 0){
parhat = rep(NA, length(mod$parhat) + length(pad_values))
parhat[setdiff(1:length(parhat), pad_values)] = mod$parhat
covhat = array(NA, dim = dim(mod$covhat) + rep(length(pad_values), 2))
covhat[setdiff(1:length(parhat), pad_values), setdiff(1:length(parhat), pad_values)] = mod$covhat
mod$parhat = ifelse(is.na(parhat), 0, parhat)
mod$covhat = ifelse(is.na(covhat), 0, covhat)
#parname = c("padded_intercept", parname)
}
#rownames(mod$covhat) = colnames(mod$covhat) = names(mod$parhat)
# Augment stuff
formula_full = stats::update.formula(formula_orig, linear_predictor ~. + 0)
mod$coefficients = mod$parhat
mod$vcov = mod$covhat
mod$formula = formula_full
mod$family = stats::gaussian(link = "identity")
if( FALSE ){
Tmp = model.matrix(formula_full, data=fit$effects$catchability_data )
}
# Functions for package
family.fit_model = function(x,...) x$family
vcov.fit_model = function(x,...) x$vcov
# dummy functions to make Effect.default work
dummyfuns = list(variance = function(mu) mu, initialize = expression(mustart = y + 0.1), dev.resids = function(...) stats::poisson()$dev.res(...) )
# Replace family (for reasons I don't really understand)
fam = mod$family
for(i in names(dummyfuns)){
if(is.null(fam[[i]])) fam[[i]] = dummyfuns[[i]]
}
# allow calculation of effects ...
if(length(formals(fam$variance)) >1) {
warning("overriding variance function for effects: computed variances may be incorrect")
fam$variance = dummyfuns$variance
}
# Bundle arguments
args = list(call = mod$call, coefficients = mod$coefficients, vcov = mod$vcov, family = fam, formula = formula_full)
# Do call
effects::Effect.default(focal.predictors, mod, ..., sources = args)
}
get_vast_covariate_effects<- function(vast_fit, params_plot, params_plot_levels, effects_pad_values, nice_category_names, out_dir, ...){
if(FALSE){
tar_load(vast_fit)
params_plot<- c("Depth", "SST_seasonal", "BT_seasonal")
params_plot_levels<- 100
effects_pad_values = c(1)
nice_category_names = "American lobster"
}
# Load covariate_data_full and catchability_data_full into global memory
assign("covariate_data_full", vast_fit$effects$covariate_data_full, envir = .GlobalEnv)
assign("catchability_data_full", vast_fit$effects$catchability_data_full, envir = .GlobalEnv)
# Going to loop through each of the values and create a dataframe with all of the information...
x1_rescale<- function(x) plogis(x)
x2_rescale<- function(x) exp(x)
for(i in seq_along(params_plot)){
pred_dat_temp_X1<- data.frame(Effect.fit_model_aja(focal.predictors = params_plot[i], mod = vast_fit, which_formula = "X1", xlevels = params_plot_levels, pad_values = effects_pad_values)) %>%
mutate(., "Lin_pred" = "X1")
pred_dat_temp_X2<- data.frame(Effect.fit_model_aja(focal.predictors = params_plot[i], mod = vast_fit, which_formula = "X2", xlevels = params_plot_levels, pad_values = effects_pad_values)) %>%
mutate(., "Lin_pred" = "X2")
# Combine into one...
pred_dat_out_temp<- bind_rows(pred_dat_temp_X1, pred_dat_temp_X2)
if(i == 1){
pred_dat_out<- pred_dat_out_temp
} else {
pred_dat_out<- bind_rows(pred_dat_out, pred_dat_out_temp)
}
}
# Save and return it
saveRDS(pred_dat_out, file = paste(out_dir, "/", nice_category_names, "_covariate_effects.rds", sep = ""))
return(pred_dat_out)
}
plot_vast_covariate_effects<- function(vast_covariate_effects, vast_fit, nice_category_names, out_dir, ...){
if(FALSE){
vast_covariate_effects<- read_rds(file = "~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/results/tables/American lobster_covariate_effects.rds")
tar_load(vast_fit)
vast_covariate_effects = pred_dat_out
vast_fit = fit_base
nice_category_names = "American lobster"
plot_rows = 2
out_dir = "~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/results/plots_maps/"
}
# Some reshaping...
names_stay<- c("fit", "se", "lower", "upper", "Lin_pred")
vast_cov_eff_l<- vast_covariate_effects %>%
pivot_longer(., names_to = "Variable", values_to = "Covariate_Value", -{{names_stay}}) %>%
drop_na(Covariate_Value)
# Plotting time...
# Need y max by linear predictors...
ylim_dat<- vast_cov_eff_l %>%
group_by(., Lin_pred, Variable) %>%
summarize(., "Min" = min(lower, na.rm = TRUE),
"Max" = max(upper, na.rm = TRUE))
plot_out<- ggplot() +
geom_ribbon(data = vast_cov_eff_l, aes(x = Covariate_Value, ymin = lower, ymax = upper), fill = "#bdbdbd") +
geom_line(data = vast_cov_eff_l, aes(x = Covariate_Value, y = fit)) +
xlab("Scaled covariate value") +
ylab("Linear predictor fitted value") +
facet_grid(Lin_pred ~ Variable, scales = "free") +
theme_bw() +
theme(strip.background = element_blank())
# Add in sample rug...
names_keep<- unique(vast_cov_eff_l$Variable)
samp_dat<- vast_fit$covariate_data %>%
dplyr::select({{names_keep}}) %>%
gather(., "Variable", "Covariate_Value")
plot_out2<- plot_out +
geom_rug(data = samp_dat, aes(x = Covariate_Value))
# Save and return it
ggsave(plot_out2, file = paste(out_dir, "/", nice_category_names, "_covariate_effects.jpg", sep = ""))
return(plot_out2)
}
######
## Plot samples, knots and mesh
######
vast_plot_design<- function(vast_fit, land, spat_grid, xlim = c(-80, -55), ylim = c(35, 50), land_color = "#f0f0f0", out_dir){
if(FALSE){
tar_load(vast_fit)
tar_load(land_sf)
spat_grid = "~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/data/predict/predict_stack_SST_seasonal_mean.grd"
land = land_sf
xlim = c(-80, -55)
ylim = c(35, 50)
land_color = "#f0f0f0"
vast_fit = vast_fitted
land = land_use
spat_grid = spat_grid
xlim = xlim_use
ylim = ylim_use
land_color = "#f0f0f0"
out_dir = main_dir
}
# Read in raster
spat_grid<- rotate(raster::stack(spat_grid)[[1]])
# Intensity surface of sample locations and then a plot of the knot locations/mesh over the top?
samp_dat<- vast_fit$data_frame %>%
distinct(., Lon_i, Lat_i, .keep_all = TRUE) %>%
st_as_sf(., coords = c("Lon_i", "Lat_i"), remove = FALSE, crs = st_crs(land))
cell_samps<- table(cellFromXY(spat_grid, data.frame("x" = samp_dat$Lon_i, "y" = samp_dat$Lat_i)))
# Put back into raster...
spat_grid[]<- 0
spat_grid[as.numeric(names(cell_samps))]<- cell_samps
spat_grid_plot<- as.data.frame(spat_grid, xy = TRUE)
names(spat_grid_plot)[3]<- "Samples"
spat_grid_plot$Samples<- ifelse(spat_grid_plot$Samples == 0, NA, spat_grid_plot$Samples)
tow_samps<- ggplot() +
geom_tile(data = spat_grid_plot, aes(x = x, y = y, fill = Samples)) +
scale_fill_gradient2(name = "Tow samples", low = "#bdbdbd", high = "#525252", na.value = "white") +
geom_sf(data = land, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = 0) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt")) +
ggtitle("Tow samples")
# Knots and mesh...
# Getting spatial information
spat_data<- vast_fit$extrapolation_list
extrap_grid<- data.frame("Lon" = as.numeric(spat_data$Data_Extrap$Lon), "Lat" = as.numeric(spat_data$Data_Extrap$Lat)) %>%
distinct(., Lon, Lat)
tow_samps_grid<- tow_samps +
geom_point(data = extrap_grid, aes(x = Lon, y = Lat), fill = "#41ab5d", pch = 21, size = 0.75) +
ggtitle("VAST spatial extrapolation grid")
# Get mesh as sf
mesh_sf<- vast_mesh_to_sf(vast_fit, crs_transform = "+proj=longlat +datum=WGS84 +no_defs")$triangles
tow_samps_mesh<- tow_samps +
geom_sf(data = land, fill = land_color, lwd = 0.2, na.rm = TRUE) +
geom_sf(data = mesh_sf, fill = NA, color = "#41ab5d") +
coord_sf(xlim = xlim, ylim = ylim, expand = 0) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt")) +
ggtitle("INLA Mesh")
# Plot em together
plot_out<- tow_samps + tow_samps_grid + tow_samps_mesh
# Save it
ggsave(plot_out, file = paste(out_dir, "/", "samples_grid_knots_plot.jpg", sep = ""), height = 8, width = 11)
return(plot_out)
}
#####
## Plot covariate values
#####
plot_spattemp_cov_ts<- function(predict_covariates_stack_agg, summarize = "seasonal", ensemble_stat = "mean", all_tows_with_all_covs, regions, land, out_dir){
if(FALSE){
tar_load(predict_covariates_stack_agg_out)
predict_covariates_stack_agg<- predict_covariates_stack_agg_out
summarize = "seasonal"
ensemble_stat = "mean"
tar_load(all_tows_with_all_covs)
tar_load(land_sf)
land = land_sf
tar_load(index_shapefiles)
out_dir<- "~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/results/plots_maps/"
}
# Get raster stack covariate files
rast_files_load<- list.files(predict_covariates_stack_agg, pattern = paste0(summarize, "_", ensemble_stat, ".grd"), full.names = TRUE)
# Get variable names
cov_names_full<- list.files(predict_covariates_stack_agg, pattern = paste0(summarize, "_", ensemble_stat, ".grd"), full.names = FALSE)
predict_covs_names<- gsub(paste("_", ensemble_stat, ".grd", sep = ""), "", gsub("predict_stack_", "", cov_names_full))
# Loop through
for(i in seq_along(rast_files_load)){
# Get variable names
cov_names_full<- list.files(predict_covariates_stack_agg, pattern = paste0(summarize, "_", ensemble_stat, ".grd"), full.names = FALSE)[i]
predict_covs_names<- gsub(paste("_", ensemble_stat, ".grd", sep = ""), "", gsub("predict_stack_", "", cov_names_full))
# Prediction values
spattemp_summs<- data.frame(raster::extract(raster::rotate(raster::stack(rast_files_load[i])), index_shapefiles, fun = mean))
spattemp_summs$Region<- factor(unique(as.character(index_shapefiles$Region)), levels = c("NMFS_and_DFO", "DFO", "Scotian_Shelf", "NMFS", "Gulf_of_Maine", "Georges_Bank", "Southern_New_England", "Mid_Atlantic_Bight"))
spattemp_summs<- spattemp_summs %>%
drop_na(., Region)
# Gather
spattemp_summs_df<- spattemp_summs %>%
pivot_longer(., names_to = "Time", values_to = "Value", -Region)
# Formatting Time
spattemp_summs_df<- spattemp_summs_df %>%
mutate(., Date = gsub("X", "", gsub("[.]", "-", Time)))
spattemp_summs_df$Date<- as.Date(paste(as.numeric(gsub("([0-9]+).*$", "\\1", spattemp_summs_df$Date)), ifelse(grepl("Spring", spattemp_summs_df$Date), "-04-15", ifelse(grepl("Summer", spattemp_summs_df$Date), "-07-15", ifelse(grepl("Winter", spattemp_summs_df$Date), "-12-15", "-10-15"))), sep = ""))
# Data values
cov_dat<- all_tows_with_all_covs %>%
dplyr::select(., Season_Match, DECDEG_BEGLON, DECDEG_BEGLAT, {{predict_covs_names}})
cov_dat$Date<- as.Date(paste(as.numeric(gsub("([0-9]+).*$", "\\1", cov_dat$Season_Match)), ifelse(grepl("Spring", cov_dat$Season_Match), "-04-15", ifelse(grepl("Summer", cov_dat$Season_Match), "-07-15", ifelse(grepl("Winter", cov_dat$Season_Match), "-12-15", "-10-15"))), sep = ""))
# Get summary by region...
cov_dat<- cov_dat %>%
st_as_sf(., coords = c("DECDEG_BEGLON", "DECDEG_BEGLAT"), crs = st_crs(index_shapefiles), remove = FALSE) %>%
st_join(., index_shapefiles, join = st_within) %>%
st_drop_geometry()
cov_dat_plot<- cov_dat %>%
group_by(., Date, Region) %>%
summarize_at(., .vars = {{predict_covs_names}}, .funs = mean, na.rm = TRUE)
cov_dat_plot$Region<- factor(cov_dat_plot$Region, levels = c("NMFS_and_DFO", "DFO", "Scotian_Shelf", "NMFS", "Gulf_of_Maine", "Georges_Bank", "Southern_New_England", "Mid_Atlantic_Bight"))
cov_dat_plot<- cov_dat_plot %>%
drop_na(., c({{predict_covs_names}}, Region))
# Plot
if(predict_covs_names == "Depth"){
plot_out<- ggplot() +
geom_histogram(data = spattemp_summs_df, aes(y = Value, color = Region)) +
scale_color_manual(name = "Region", values = c('#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d','#666666')) +
geom_histogram(data = cov_dat_plot, aes(y = Depth), fill = "black", pch = 21, alpha = 0.2) +
facet_wrap(~Region, nrow = 2) +
theme_bw()
}
if(predict_covs_names == "BS_seasonal"){
plot_out<- ggplot() +
geom_line(data = spattemp_summs_df, aes(x = Date, y = Value, color = Region)) +
scale_color_manual(name = "Region", values = c('#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d','#666666')) +
geom_point(data = cov_dat_plot, aes(x = Date, y = BS_seasonal), fill = "black", pch = 21, alpha = 0.2) +
facet_wrap(~Region, nrow = 2) +
theme_bw()
}
if(predict_covs_names == "SS_seasonal"){
plot_out<- ggplot() +
geom_line(data = spattemp_summs_df, aes(x = Date, y = Value, color = Region)) +
scale_color_manual(name = "Region", values = c('#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d','#666666')) +
geom_point(data = cov_dat_plot, aes(x = Date, y = SS_seasonal), fill = "black", pch = 21, alpha = 0.2) +
facet_wrap(~Region, nrow = 2) +
theme_bw()
}
if(predict_covs_names == "BT_seasonal"){
plot_out<- ggplot() +
geom_line(data = spattemp_summs_df, aes(x = Date, y = Value, color = Region)) +
scale_color_manual(name = "Region", values = c('#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d','#666666')) +
geom_point(data = cov_dat_plot, aes(x = Date, y = BT_seasonal), fill = "black", pch = 21, alpha = 0.2) +
facet_wrap(~Region, nrow = 2) +
theme_bw()
}
if(predict_covs_names == "SST_seasonal"){
plot_out<- ggplot() +
geom_line(data = spattemp_summs_df, aes(x = Date, y = Value, color = Region)) +
scale_color_manual(name = "Region", values = c('#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d','#666666')) +
geom_point(data = cov_dat_plot, aes(x = Date, y = SST_seasonal), fill = "black", pch = 21, alpha = 0.2) +
facet_wrap(~Region, nrow = 2) +
theme_bw()
}
ggsave(paste(out_dir, "/", predict_covs_names, "_covariate_plot.jpg", sep = ""), plot_out)
}
}
#####
## VAST inla mesh to sf object
#####
#' @title Convert VAST INLA mesh to sf object
#'
#' @description Convert inla.mesh to sp objects, totally taken from David Keith here https://github.com/Dave-Keith/Paper_2_SDMs/blob/master/mesh_build_example/convert_inla_mesh_to_sf.R and Finn Lindgren here
# # https://groups.google.com/forum/#!topic/r-inla-discussion-group/z1n1exlZrKM
#'
#' @param vast_fit A fitted VAST model
#' @param crs_transform Optional crs to transform mesh into
#' @return A list with \code{sp} objects for triangles and vertices:
# \describe{
# \item{triangles}{\code{SpatialPolygonsDataFrame} object with the triangles in
# the same order as in the original mesh, but each triangle looping through
# the vertices in clockwise order (\code{sp} standard) instead of
# counterclockwise order (\code{inla.mesh} standard). The \code{data.frame}
# contains the vertex indices for each triangle, which is needed to link to
# functions defined on the vertices of the triangulation.
# \item{vertices}{\code{SpatialPoints} object with the vertex coordinates,
# in the same order as in the original mesh.}
# }
#' @export
#
vast_mesh_to_sf <- function(vast_fit, crs_transform = "+proj=longlat +datum=WGS84 +no_defs") {
if(FALSE){
tar_load(vast_fit)
crs_transform = "+proj=longlat +datum=WGS84 +no_defs"
}
require(sp) || stop("Install sp, else thine code shan't work for thee")
require(sf) || stop('Install sf or this code will be a mess')
require(INLA) || stop("You need the R-INLA package for this, note that it's not crantastic...
install.packages('INLA', repos=c(getOption('repos'), INLA='https://inla.r-inla-download.org/R/stable'), dep=TRUE)")
# Get the extrapolation mesh information from the vast_fitted object
mesh<- vast_fit$spatial_list$MeshList$anisotropic_mesh
mesh['crs']<- vast_fit$extrapolation_list$projargs
# Grab the CRS if it exists, NA is fine (NULL spits a warning, but is also fine)
crs <- sp::CRS(mesh$crs)
# Make sure the CRS isn't a geocentric one, which is won't be if yo look up geocentric..
#isgeocentric <- identical(inla.as.list.CRS(crs)[["proj"]], "geocent")
isgeocentric <- inla.crs_is_geocent(mesh$crs)
# Look up geo-centric coordinate systems, nothing we'll need to worry about, but stop if so
if (isgeocentric || (mesh$manifold == "S2")) {
stop(paste0(
"'sp and sf' don't support storing polygons in geocentric coordinates.\n",
"Convert to a map projection with inla.spTransform() before calling inla.mesh2sf()."))
}
# This pulls out from the mesh the triangles as polygons, this was the piece I couldn't figure out.
triangles <- SpatialPolygonsDataFrame(Sr = SpatialPolygons(
lapply(
1:nrow(mesh$graph$tv),
function(x) {
tv <- mesh$graph$tv[x, , drop = TRUE]
Polygons(list(Polygon(mesh$loc[tv[c(1, 3, 2, 1)],1:2,drop = FALSE])),ID = x)
}
),
proj4string = crs
),
data = as.data.frame(mesh$graph$tv[, c(1, 3, 2), drop = FALSE]),
match.ID = FALSE
)
# This one is easy, just grab the vertices (points)
vertices <- SpatialPoints(mesh$loc[, 1:2, drop = FALSE], proj4string = crs)
# Make these sf objects
triangles <- st_as_sf(triangles)
vertices <- st_as_sf(vertices)
# Transform?
if(!is.null(crs_transform)){
triangles<- st_transform(triangles, crs = crs_transform)
vertices<- st_transform(vertices, crs = crs_transform)
}
# Add your output list.
return_sf<- list(triangles = triangles, vertices = vertices)
return(return_sf)
}
#' @title Plot VAST model spatial and spatio-temporal surfaces
#'
#' @description Creates either a panel plot or a gif of VAST model spatial or spatio-temporal parameter surfaces or derived quantities
#'
#' @param vast_fit = A VAST `fit_model` object.
#' @param spatial_var = An estimated spatial coefficient or predicted value. Currently works for `D_gct`, `R1_gct`, `R2_gct`, `P1_gct`, `P2_gct`, `Omega1_gc`, `Omega2_gc`, `Epsilon1_gct`, `Epsilon2_gct`.
#' @param nice_category_names = A
#' @param all_times = A vector of all of the unique time steps available from the VAST fitted model
#' @param plot_times = Either NULL to make a plot for each time in `all_times` or a vector of all of the times to plot, which must be a subset of `all_times`
#' @param land_sf = Land sf object
#' @param xlim = A two element vector with the min and max longitudes
#' @param ylim = A two element vector with the min and max latitudes
#' @param panel_or_gif = A character string of either "panel" or "gif" indicating how the multiple plots across time steps should be displayed
#' @param out_dir = Output directory to save the panel plot or gif
#'
#' @return A VAST fit_model object, with the inputs and and outputs, including parameter estimates, extrapolation gid info, spatial list info, data info, and TMB info.
#'
#' @export
vast_fit_plot_spatial<- function(vast_fit, spatial_var, nice_category_names, mask, all_times = all_times, plot_times = NULL, land_sf, xlim, ylim, panel_or_gif = "gif", out_dir, land_color = "#d9d9d9", panel_cols = NULL, panel_rows = NULL, ...){
if(FALSE){
tar_load(vast_fit)
template = raster("~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/data/supporting/HighResTemplate.grd")
tar_load(vast_seasonal_data)
all_times = as.character(levels(vast_seasonal_data$VAST_YEAR_SEASON))
plot_times = NULL
tar_load(land_sf)
tar_load(region_shapefile)
mask = region_shapefile
land_color = "#d9d9d9"
res_data_path = "~/Box/RES_Data/"
xlim = c(-85, -55)
ylim = c(30, 50)
panel_or_gif = "gif"
panel_cols = NULL
panel_rows = NULL
vast_fit = vast_fitted
spatial_var = "D_gct"
nice_category_names = "Atlantic halibut"
mask = region_shape
all_times = as.character(unique(vast_sample_data$EST_YEAR))
plot_times = NULL
land_sf = land_use
xlim = xlim_use
ylim = ylim_use
panel_or_gif = "panel"
out_dir = here::here("", "results/plots_maps")
land_color = "#d9d9d9"
panel_cols = 6
panel_rows = 7
}
# Plotting at spatial knots...
# First check the spatial_var, only a certain subset are being used...
if(!spatial_var %in% c("D_gct", "R1_gct", "R2_gct", "P1_gct", "P2_gct", "Omega1_gc", "Omega2_gc", "Epsilon1_gct", "Epsilon2_gct")){
stop(print("Check `spatial_var` input. Currently must be one of `D_gct`, `R1_gct`, `R2_gct`, `P1_gct`, `P2_gct`, `Omega1_gc`, `Omega2_gc`, `Epsilon1_gct`, `Epsilon2_gct`."))
}
# Getting prediction array
pred_array<- vast_fit$Report[[{{spatial_var}}]]
if(spatial_var == "D_gct"){
pred_array<- log(pred_array+1)
}
# Getting time info
if(!is.null(plot_times)){
plot_times<- all_times[which(all_times) %in% plot_times]
} else {
plot_times<- all_times
}
# Getting spatial information
spat_data<- vast_fit$extrapolation_list
loc_g<- spat_data$Data_Extrap[which(spat_data$Data_Extrap[, "Include"] > 0), c("Lon", "Lat")]
CRS_orig<- sp::CRS("+proj=longlat")
CRS_proj<- sp::CRS(spat_data$projargs)
land_sf<- st_crop(land_sf, xmin = xlim[1], ymin = ylim[1], xmax = xlim[2], ymax = ylim[2])
# Looping through...
rasts_out<- vector("list", dim(pred_array)[length(dim(pred_array))])
rasts_range<- pred_array
rast_lims_min<- ifelse(spatial_var %in% c("D_gct", "R1_gct", "R2_gct", "P1_gct", "P2_gct"), 0, min(rasts_range))
rast_lims_max<- ifelse(spatial_var %in% c("D_gct", "R1_gct", "R2_gct", "P1_gct", "P2_gct"), round(max(rasts_range) + 0.0000001, 2), max(rasts_range))
rast_lims<- c(rast_lims_min, rast_lims_max)
if(length(dim(pred_array)) == 2){
data_df<- data.frame(loc_g, z = pred_array)
# Interpolation
pred_df<- na.omit(data.frame("x" = data_df$Lon, "y" = data_df$Lat, "layer" = data_df$z))
pred_df_interp<- interp(pred_df[,1], pred_df[,2], pred_df[,3], duplicate = "mean", extrap = TRUE,
xo=seq(-87.99457, -57.4307, length = 115),
yo=seq(22.27352, 48.11657, length = 133))
pred_df_interp_final<- data.frame(expand.grid(x = pred_df_interp$x, y = pred_df_interp$y), z = c(round(pred_df_interp$z, 2)))
pred_sp<- st_as_sf(pred_df_interp_final, coords = c("x", "y"), crs = CRS_orig)
pred_df_temp<- pred_sp[which(st_intersects(pred_sp, mask, sparse = FALSE) == TRUE),]
coords_keep<- as.data.frame(st_coordinates(pred_df_temp))
row.names(coords_keep)<- NULL
pred_df_use<- data.frame(cbind(coords_keep, "z" = as.numeric(pred_df_temp$z)))
names(pred_df_use)<- c("x", "y", "z")
plot_out<- ggplot() +
geom_tile(data = pred_df_use, aes(x = x, y = y, fill = z)) +
scale_fill_viridis_c(name = spatial_var, option = "viridis", na.value = "transparent", limits = rast_lims) +
annotate("text", x = -65, y = 37.5, label = spatial_var) +
geom_sf(data = land_sf, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = FALSE) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt"))
ggsave(filename = paste(out_dir, "/", nice_category_names, "_", spatial_var, ".png", sep = ""), plot_out, width = 11, height = 8, units = "in")
return(plot_out)
} else {
for (tI in 1:dim(pred_array)[3]) {
data_df<- data.frame(loc_g, z = pred_array[,1,tI])
# Interpolation
pred_df<- na.omit(data.frame("x" = data_df$Lon, "y" = data_df$Lat, "layer" = data_df$z))
pred_df_interp<- interp(pred_df[,1], pred_df[,2], pred_df[,3], duplicate = "mean", extrap = TRUE,
xo=seq(-87.99457, -57.4307, length = 115),
yo=seq(22.27352, 48.11657, length = 133))
pred_df_interp_final<- data.frame(expand.grid(x = pred_df_interp$x, y = pred_df_interp$y), z = c(round(pred_df_interp$z, 2)))
pred_sp<- st_as_sf(pred_df_interp_final, coords = c("x", "y"), crs = CRS_orig)
pred_df_temp<- pred_sp[which(st_intersects(pred_sp, mask, sparse = FALSE) == TRUE),]
coords_keep<- as.data.frame(st_coordinates(pred_df_temp))
row.names(coords_keep)<- NULL
pred_df_use<- data.frame(cbind(coords_keep, "z" = as.numeric(pred_df_temp$z)))
names(pred_df_use)<- c("x", "y", "z")
time_plot_use<- plot_times[tI]
rasts_out[[tI]]<- ggplot() +
geom_tile(data = pred_df_use, aes(x = x, y = y, fill = z)) +
scale_fill_viridis_c(name = spatial_var, option = "viridis", na.value = "transparent", limits = rast_lims) +
annotate("text", x = -65, y = 37.5, label = time_plot_use) +
geom_sf(data = land_sf, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = FALSE) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt"))
}
if(panel_or_gif == "panel"){
# Panel plot
all_plot<- wrap_plots(rasts_out, ncol = panel_cols, nrow = panel_rows, guides = "collect", theme(plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt")))
ggsave(filename = paste0(out_dir, "/", nice_category_names, "_", spatial_var, ".png"), all_plot, width = 11, height = 8, units = "in")
return(all_plot)
} else {
# Make a gif
plot_loop_func<- function(plot_list){
for (i in seq_along(plot_list)) {
plot_use<- plot_list[[i]]
print(plot_use)
}
}
invisible(save_gif(plot_loop_func(rasts_out), paste0(out_dir, "/", nice_category_names, "_", spatial_var, ".gif"), delay = 0.75, progress = FALSE))
}
}
}
#' @title Get VAST point predictions
#'
#' @description Generates a dataframe with observed and VAST model predictions at sample locations
#'
#' @param vast_fit = A VAST `fit_model` object.
#' @param use_PredTF_only = Logical TRUE/FALSE. If TRUE, then only the locations specified as PredTF == 1 will be extracted. Otherwise, all points will be included.
#' @param nice_category_names
#' @param out_dir = Output directory to save the dataset
#'
#' @return A dataframe with lat, lon, observations and model predictions
#'
#' @export
vast_get_point_preds<- function(vast_fit, use_PredTF_only, nice_category_names, out_dir){
if(FALSE){
vast_fit = vast_fitted
use_PredTF_only = FALSE
nice_category_names<- "Atlantic halibut"
out_dir = here::here("", "results/tables")
}
# Collecting the sample data
samp_dat<- vast_fit$data_frame %>%
dplyr::select(., Lat_i, Lon_i, b_i, t_i)
names(samp_dat)<- c("Lat", "Lon", "Biomass", "Year")
samp_dat$Presence<- ifelse(samp_dat$Biomass > 0, 1, 0)
# Now, getting the model predictions
pred_dat<- vast_fit$Report
# Combine em
samp_pred_out<- data.frame(samp_dat, "Predicted_ProbPresence" = pred_dat$R1_i, "Predicted_Biomass" = pred_dat$D_i)
# Add PredTF column -- this is 1 if the sample is only going to be used in predicted probability and NOT in estimating the likelihood
samp_pred_out$PredTF_i<- vast_fit$data_list$PredTF_i
# Subset if use_PredTF_only is TRUE
if(use_PredTF_only){
samp_pred_out<- samp_pred_out %>%
dplyr::filter(., PredTF_i == 1)
}
# Save and return it
saveRDS(samp_pred_out, paste0(out_dir, "/", nice_category_names, "_obs_pred.rds"))
return(samp_pred_out)
}
#' @title Get VAST knot predictions for spatial or spatio-temporal parameters/derived quantities
#'
#' @description Generates a dataframe with VAST model spatial or spatio-temporal parameters/derived quantities at each knot location
#'
#' @param vast_fit = A VAST `fit_model` object.
#' @param spatial_var = An estimated spatial coefficient or predicted value. Currently works for `D_gct`, `R1_gct`, `R2_gct`, `P1_gct`, `P2_gct`, `Omega1_gc`, `Omega2_gc`, `Epsilon1_gct`, `Epsilon2_gct`.
#' @param nice_category_names
#' @param out_dir = Output directory to save the dataframe
#'
#' @return A dataframe with lat, lon, observations and model predictions
#'
#' @export
vast_get_extrap_spatial<- function(vast_fit,spatial_var, nice_category_names, out_dir){
if(FALSE){
vast_fit = vast_fitted
spatial_var = "D_gct"
nice_category_names<- "Atlantic_halibut"
out_dir = here::here("", "results/tables")
}
# First check the spatial_var, only a certain subset are being used...
if(!spatial_var %in% c("D_gct", "R1_gct", "R2_gct", "P1_gct", "P2_gct", "Omega1_gc", "Omega2_gc", "Epsilon1_gct", "Epsilon2_gct")){
stop(print("Check `spatial_var` input. Currently must be one of `D_gct`, `R1_gct`, `R2_gct`, `P1_gct`, `P2_gct`, `Omega1_gc`, `Omega2_gc`, `Epsilon1_gct`, `Epsilon2_gct`."))
}
# Getting prediction array
pred_array<- vast_fit$Report[[{{spatial_var}}]]
if(spatial_var == "D_gct"){
pred_array<- log(pred_array+1)
}
# Getting time info
times<- as.character(vast_fit$year_labels)
# Getting extrapolation grid locations
spat_data<- vast_fit$extrapolation_list
loc_g<- spat_data$Data_Extrap[which(spat_data$Data_Extrap[, "Include"] > 0), c("Lon", "Lat")]
# Creating the dataframe to save...
df_out_temp<- as.data.frame(pred_array)
colnames(df_out_temp) = paste0("Time_", times)
df_out_temp<- cbind(loc_g, df_out_temp)
df_out<- df_out_temp %>%
pivot_longer(., cols = !c("Lon", "Lat"), names_to = "Time", values_to = {{spatial_var}}) %>%
arrange(., Time, Lon, Lat)
# Save and return it
saveRDS(df_out, paste0(out_dir, "/", nice_category_names, "_", spatial_var, "_df.rds"))
return(df_out)
}
#' @title Plot VAST center of gravity
#'
#' @description Blah
#'
#' @param vast_fit = A VAST `fit_model` object.
#' @param land_sf = Land sf object
#' @param xlim = A two element vector with the min and max longitudes
#' @param ylim = A two element vector with the min and max latitudes
#' @param nice_category_names = Species name
#' @param out_dir = Output directory to save the dataset
#'
#' @return Blah
#'
#' @export
vast_plot_cog<- function(vast_fit, all_times, summarize = TRUE, land_sf, xlim, ylim, nice_category_names, land_color = "#d9d9d9", color_pal = NULL, out_dir){
if(FALSE){
tar_load(vast_fit)
all_times = levels(vast_seasonal_data$VAST_YEAR_SEASON)
tar_load(land_sf)
land_sf = land_sf
xlim = c(-80, -55)
ylim = c(35, 50)
nice_category_names<- nice_category_names
land_color = "#d9d9d9"
out_dir = paste0(res_root, "plots_maps")
vast_fit = vast_fitted
all_times = unique(vast_sample_data$Year)
summarize = TRUE
land_sf = land_use
xlim = xlim_use
ylim = ylim_use
nice_category_names = "Atlantic_halibut"
land_color = "#d9d9d9"
color_pal = NULL
out_dir = here::here("", "results/plots_maps")
}
TmbData<- vast_fit$data_list
Sdreport<- vast_fit$parameter_estimates$SD
# Time series steps
time_ind<- 1:TmbData$n_t
time_labels<- sort(unique(vast_fit$data_frame$t_i)[time_ind])
# Categories
categories_ind<- 1:TmbData$n_c
# Get the index information
SD<- TMB::summary.sdreport(Sdreport)
SD_stderr<- TMB:::as.list.sdreport(Sdreport, what = "Std. Error", report = TRUE)
SD_estimate<- TMB:::as.list.sdreport(Sdreport, what = "Estimate", report = TRUE)
if(vast_fit$settings$bias.correct == TRUE && "unbiased" %in% names(Sdreport)){
SD_estimate_biascorrect<- TMB:::as.list.sdreport(Sdreport, what = "Std. (bias.correct)", report = TRUE)
}
# Now, populate array with values
mean_Z_ctm = array(NA, dim = c(unlist(TmbData[c('n_c','n_t')]), 2, 2), dimnames = list(categories_ind, time_labels, c('Lon', 'Lat'), c('Estimate','Std. Error')))
mean_Z_ctm[] = SD[which(rownames(SD) == "mean_Z_ctm"), c('Estimate','Std. Error')]
index_res_array = mean_Z_ctm
# Data manipulation to get out out the array and to something more "plottable"
for(i in seq_along(categories_ind)){
index_array_temp<- index_res_array[i, , , ]
index_res_temp_est<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,1])
index_res_temp_sd<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,2])
names(index_res_temp_sd)[3:4]<- c("Lon_SD", "Lat_SD")
index_res_temp_out<- index_res_temp_est %>%
left_join(., index_res_temp_sd) %>%
mutate(., "Lon_Min" = Lon - Lon_SD,
"Lon_Max" = Lon + Lon_SD,
"Lat_Min" = Lat - Lat_SD,
"Lat_Max" = Lat + Lat_SD)
if(i == 1){
index_res_out<- index_res_temp_out
} else {
index_res_out<- bind_rows(index_res_out, index_res_temp_out)
}
}
# Get date info instead of time..
# if(!is.null(vast_fit$covariate_data)){
# year_start<- min(as.numeric(as.character(vast_fit$covariate_data$Year_Cov)))
#
# if(any(grepl("Season", vast_fit$X1_formula))){
# seasons<- nlevels(unique(vast_fit$covariate_data$Season))
# if(seasons == 3){
# time_labels_use<- paste(rep(seq(from = year_start, to = max(as.numeric(as.character(vast_fit$covariate_data$Year_Cov)))), each = 3), rep(c("SPRING", "SUMMER", "FALL")), sep = "-")
# }
# } else {
# time_labels_use<- paste(rep(seq(from = year_start, to = max(as.numeric(as.character(vast_fit$covariate_data$Year_Cov)))), each = 1), rep(c("FALL")), sep = "-")
# }
#
# index_res_out$Date<- factor(all_times, levels = time_labels_use)
#
# } else {
# # Just basic years...
# time_labels_use<- seq(from = min(vast_fit$year_labels), to = max(vast_fit$year_labels))
# index_res_out$Date<- factor(time_labels_use, levels = time_labels_use)
# }
#
index_res_out$Date<- factor(all_times, levels = all_times)
# Date info
index_res_out<- index_res_out %>%
mutate(., Year = as.numeric(gsub("([0-9]+).*$", "\\1", Date)))
if(any(str_detect(as.character(index_res_out$Date), LETTERS))){
index_res_out$Date<- as.Date(paste(index_res_out$Year, ifelse(grepl("SPRING", index_res_out$Date), "-04-15",
ifelse(grepl("SUMMER", index_res_out$Date), "-07-15", "-10-15")), sep = ""))
} else {
index_res_out$Date<- as.Date(paste(index_res_out$Year, "-06-15", sep = ""))
}
# Summarize to a year?
if(summarize){
index_res_out<- index_res_out %>%
group_by(., Year, Category, .drop = FALSE) %>%
summarize_at(., vars(c("Lon", "Lat", "Lon_Min", "Lon_Max", "Lat_Min", "Lat_Max")), mean, na.rm = TRUE)
}
# Making our plots...
# First, the map.
cog_sf<- st_as_sf(index_res_out, coords = c("Lon", "Lat"), crs = attributes(vast_fit$spatial_list$loc_i)$projCRS)
# Transform to be in WGS84
cog_sf_wgs84<- st_transform(cog_sf, st_crs(land_sf))
# Base map
cog_plot<- ggplot() +
geom_sf(data = cog_sf_wgs84, aes(fill = Year), size = 2, shape = 21) +
scale_fill_viridis_c(name = "Year", limits = c(min(cog_sf_wgs84$Year), max(cog_sf_wgs84$Year))) +
geom_sf(data = land_sf, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = FALSE) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt"))
# Now, the lon/lat time series
lon_lat_df<- cog_sf_wgs84 %>%
data.frame(st_coordinates(.))
lon_lat_min<- st_as_sf(index_res_out, coords = c("Lon_Min", "Lat_Min"), crs = attributes(vast_fit$spatial_list$loc_i)$projCRS) %>%
st_transform(., st_crs(land_sf)) %>%
data.frame(st_coordinates(.)) %>%
dplyr::select(c("X", "Y"))
names(lon_lat_min)<- c("Lon_Min_WGS", "Lat_Min_WGS")
lon_lat_max<- st_as_sf(index_res_out, coords = c("Lon_Max", "Lat_Max"), crs = attributes(vast_fit$spatial_list$loc_i)$projCRS) %>%
st_transform(., st_crs(land_sf)) %>%
data.frame(st_coordinates(.)) %>%
dplyr::select(c("X", "Y"))
names(lon_lat_max)<- c("Lon_Max_WGS", "Lat_Max_WGS")
lon_lat_df<- cbind(lon_lat_df, lon_lat_min, lon_lat_max)
names(lon_lat_df)[8:9]<- c("Lon", "Lat")
lon_lat_df$Date<- as.Date(paste0(lon_lat_df$Year, "-06-15"))
if(!is.null(color_pal)){
colors_use<- color_pal
} else {
color_pal<- c('#66c2a5','#fc8d62','#8da0cb','#e78ac3','#a6d854')
colors_use<- color_pal[1:length(unique(lon_lat_df$Category))]
}
lon_ts<- ggplot() +
geom_ribbon(data = lon_lat_df, aes(x= Date, ymin = Lon_Min_WGS, ymax = Lon_Max_WGS), fill = '#66c2a5', alpha = 0.3) +
geom_line(data = lon_lat_df, aes(x = Date, y = Lon), color = '#66c2a5', lwd = 2) +
#scale_fill_manual(name = "Category", values = '#66c2a5') +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
ylab("Center of longitude") +
xlab("Date") +
theme_bw() +
theme(legend.title = element_blank(), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1))
lat_ts<- ggplot() +
geom_ribbon(data = lon_lat_df, aes(x= Date, ymin = Lat_Min_WGS, ymax = Lat_Max_WGS), fill = '#66c2a5', alpha = 0.3) +
geom_line(data = lon_lat_df, aes(x = Date, y = Lat), color = '#66c2a5', lwd = 2) +
#scale_fill_manual(name = "Category", values = '#66c2a5') +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
ylab("Center of latitude") +
xlab("Date") +
theme_bw() +
theme(legend.title = element_blank(), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1))
plot_out<- (cog_plot) / (lon_ts + lat_ts) + plot_layout(ncol = 1, nrow = 2, widths = c(0.75, 1), heights = c(0.75, 1))
# Save and return it
ggsave(plot_out, file = paste(out_dir, "/COG_", "_", nice_category_names, ".jpg", sep = ""))
return(plot_out)
}
|
/R/vast_functions.R
|
no_license
|
Dave-Keith/TargetsSDM
|
R
| false
| false
| 147,955
|
r
|
#### Common Resources ####
pred_template_load<- function(pred_template_dir){
if(FALSE){
tar_load(pred_template_dir)
}
# Load the raster template gird
pred_template_rast<- raster(paste(pred_template_dir, "mod_pred_template.grd", sep = "/"))
# Convert it to a data frame
pred_template_df<- as.data.frame(pred_template_rast, xy = TRUE) %>%
drop_na() %>%
dplyr::select(., x, y)
names(pred_template_df)<- c("longitude", "latitude")
# Return it
return(pred_template_df)
}
high_res_load <- function(high_res_dir) {
high_res<- raster(paste(high_res_dir, "HighResTemplate.grd", sep = "/"))
return(high_res)
}
#### Functions ####
####
#' @title Make VAST prediction dataframe
#'
#' @description This function creates a dataframe of prediction covariates to combine with the other VAST data
#'
#' @param predict_covariates_stack_agg = The directory holding processed covariate raster stacks
#' @param mask = Shapefile mask
#' @param summarize = Currently, either "annual" or "seasonal" to indicate whether the each dynamic raster stack should be summarized to an annual or seasonal time scale
#' @param ensemble_stat = Either the climate model ensemble statistic to use when working with climate model projections, or NULL. This is only used in naming the output file
#' @param fit_year_min
#' @param fit_year_max
#' @param pred_years
#' @param out_dir = Directory to save the prediction dataframe
#'
#' @return A dataframe with prediction information. This file is also saved in out_dir.
#'
#' @export
make_vast_predict_df<- function(predict_covariates_stack_agg, extra_covariates_stack, covs_rescale = c("Depth", "BS_seasonal", "BT_seasonal", "SS_seasonal", "SST_seasonal"), rescale_params, depth_cut, mask, summarize, ensemble_stat, fit_seasons, fit_year_min, fit_year_max, pred_years, out_dir){
# For debugging
if(FALSE){
tar_load(predict_covariates_stack_agg_out)
predict_covariates_stack_agg<- predict_covariates_stack_agg_out
tar_load(static_covariates_stack)
extra_covariates_stack = static_covariates_stack
tar_load(rescale_params)
tar_load(region_shapefile)
mask = region_shapefile
summarize<- "seasonal"
ensemble_stat<- "mean"
fit_year_min = fit_year_min
fit_year_max = fit_year_max
pred_years = pred_years
out_dir = here::here("scratch/aja/TargetsSDM/data/predict")
covs_rescale = c("Depth", "BS_seasonal", "BT_seasonal", "SS_seasonal", "SST_seasonal")
}
####
## Need to figure out what to do about depth here!!!
# Get raster stack covariate files
rast_files_load<- list.files(predict_covariates_stack_agg, pattern = paste0(summarize, "_", ensemble_stat, ".grd$"), full.names = TRUE)
# Get variable names
cov_names_full<- list.files(predict_covariates_stack_agg, pattern = paste0(summarize, "_", ensemble_stat, ".grd$"), full.names = FALSE)
predict_covs_names<- gsub(paste("_", ensemble_stat, ".grd$", sep = ""), "", gsub("predict_stack_", "", cov_names_full))
# Looping through prediction stack time steps
for(i in 1:nlayers(raster::stack(rast_files_load[1]))){
# Get the time index
time_ind<- i
# Load corresponding raster layers matching the time index
pred_covs_stack_temp<- rotate(raster::stack(raster::stack(rast_files_load[1])[[time_ind]], raster::stack(rast_files_load[2])[[time_ind]], raster::stack(rast_files_load[3])[[time_ind]], raster::stack(rast_files_load[4])[[time_ind]]))
# Mask out values outside area of interest
pred_covs_stack_temp<- raster::mask(pred_covs_stack_temp, mask = mask)
# Some processing to keep observations within our area of interest and get things in a "tidy-er" prediction dataframe
time_name<- sub('.[^.]*$', '', names(pred_covs_stack_temp))
names(pred_covs_stack_temp)<- paste(time_name, predict_covs_names, sep = "_")
pred_covs_df_temp<- as.data.frame(pred_covs_stack_temp, xy = TRUE) %>%
drop_na()
colnames(pred_covs_df_temp)[2:ncol(pred_covs_df_temp)]<- gsub("X", "", gsub("[.]", "_", colnames(pred_covs_df_temp)[2:ncol(pred_covs_df_temp)]))
colnames(pred_covs_df_temp)[1:2]<- c("DECDEG_BEGLON", "DECDEG_BEGLAT")
pred_covs_df_out_temp<- pred_covs_df_temp %>%
pivot_longer(., -c(DECDEG_BEGLON, DECDEG_BEGLAT), names_to = c("variable"), values_to = "value") %>%
separate(., variable, into = c("EST_YEAR", "SEASON", "variable"), sep = "_", extra = "merge") %>%
pivot_wider(., names_from = variable, values_from = value)
# Adding in some other columns we will want to match up easily with 'vast_data_out'
pred_covs_df_out_temp<- pred_covs_df_out_temp %>%
mutate(., EST_YEAR = as.numeric(EST_YEAR),
DATE = paste(EST_YEAR, case_when(
SEASON == "Winter" ~ "12-16",
SEASON == "Spring" ~ "03-16",
SEASON == "Summer" ~ "07-16",
SEASON == "Fall" ~ "09-16"), sep = "-"),
SURVEY = "DUMMY",
SVVESSEL = "DUMMY",
NMFS_SVSPP = "DUMMY",
DFO_SPEC = "DUMMY",
PRESENCE = 1,
BIOMASS = 1,
ABUNDANCE = 1,
ID = paste("DUMMY", DATE, sep = ""),
PredTF = TRUE)
if(i == 1){
pred_covs_out<- pred_covs_df_out_temp
} else {
pred_covs_out<- bind_rows(pred_covs_out, pred_covs_df_out_temp)
}
}
# Only going to keep information from fit_year_max through pred_years...
pred_covs_out_final<- pred_covs_out %>%
dplyr::filter(., EST_YEAR > fit_year_max & EST_YEAR <= max(pred_years))
# New implementation...
pred_covs_out_final<- pred_covs_out_final %>%
mutate(., #VAST_YEAR_COV = EST_YEAR,
VAST_YEAR_COV = ifelse(EST_YEAR > fit_year_max, fit_year_max, EST_YEAR),
VAST_SEASON = case_when(
SEASON == "Spring" ~ "SPRING",
SEASON == "Summer" ~ "SUMMER",
SEASON == "Fall" ~ "FALL"
),
"VAST_YEAR_SEASON" = paste(EST_YEAR, VAST_SEASON, sep = "_"))
# Subset to only seasons of interest...
pred_covs_out_final<- pred_covs_out_final %>%
filter(., VAST_SEASON %in% fit_seasons)
# Need to account for new levels in year season...
all_years<- seq(from = fit_year_min, to = max(pred_years), by = 1)
all_seasons<- fit_seasons
year_season_set<- expand.grid("SEASON" = all_seasons, "EST_YEAR" = all_years)
all_year_season_levels<- apply(year_season_set[,2:1], MARGIN = 1, FUN = paste, collapse = "_")
pred_covs_out_final<- pred_covs_out_final %>%
mutate(., "VAST_YEAR_SEASON" = factor(VAST_YEAR_SEASON, levels = all_year_season_levels),
"VAST_SEASON" = factor(VAST_SEASON, levels = all_seasons))
# Name rearrangement!
# Keep only what we need..
cov_names<- names(pred_covs_out_final)[-which(names(pred_covs_out_final) %in% c("ID", "DATE", "EST_YEAR", "SEASON", "SURVEY", "SVVESSEL", "DECDEG_BEGLAT", "DECDEG_BEGLON", "NMFS_SVSPP", "DFO_SPEC", "PRESENCE", "BIOMASS", "ABUNDANCE", "PredTF", "VAST_YEAR_COV", "VAST_SEASON", "VAST_YEAR_SEASON"))]
pred_covs_out_final<- pred_covs_out_final %>%
dplyr::select(., "ID", "DATE", "EST_YEAR", "SEASON", "SURVEY", "SVVESSEL", "DECDEG_BEGLAT", "DECDEG_BEGLON", "NMFS_SVSPP", "DFO_SPEC", "PRESENCE", "BIOMASS", "ABUNDANCE", "PredTF", "VAST_YEAR_COV", "VAST_SEASON", "VAST_YEAR_SEASON", {{cov_names}})
# Any extra covariates will likely be static...
if(!is.null(extra_covariates_stack)){
pred_covs_sf<- points_to_sf(pred_covs_out_final)
pred_covs_out_final<- static_extract_wrapper(static_covariates_list = extra_covariates_stack, sf_points = pred_covs_sf, date_col_name = "DATE", df_sf = "df", out_dir = NULL)
}
# Apply depth cut and drop NAs
pred_covs_out_final<- pred_covs_out_final %>%
mutate(., "Depth" = ifelse(Depth > depth_cut, NA, Depth),
"Summarized" = summarize,
"Ensemble_Stat" = ensemble_stat) %>%
drop_na()
# Rescale
if(!is.null(rescale_params)){
for(i in seq_along(covs_rescale)){
match_mean<- rescale_params[which(names(rescale_params) == paste(covs_rescale[i], "Mean", sep = "_"))]
match_sd<- rescale_params[which(names(rescale_params) == paste(covs_rescale[i], "SD", sep = "_"))]
pred_covs_out_final<- pred_covs_out_final %>%
mutate_at(., {{covs_rescale[i]}}, .funs = covariate_rescale_func, type = "AJA", center = match_mean, scale = match_sd)
}
}
saveRDS(pred_covs_out_final, file = paste(out_dir, "/VAST_pred_df_", summarize, "_", ensemble_stat, ".rds", sep = "" ))
return(pred_covs_out_final)
}
#' @title Make VAST seasonal dataset
#'
#' @description This function reads in a tidy model dataset and does some cleaning and processing to generate a new dataset to accommodate fitting a VAST seasonal (or other intra annual) model. These cleaning and processing steps boil down to creating an ordered, continuous, season-year vector, such that the model can then estimate density even in season-years not surveyed.
#'
#' @param tidy_mod_data = A tidy model datafame with all the information (tows, habitat covariates, species occurrences) needed to fit a species distribution model.
#' @param nmfs_species_code = Numeric NMFS species code
#' @param fit_year_min = Minimum year to keep
#' @param fit_year_max = Maximum year to keep
#' @param pred_df = Either NULL or a dataframe with prediction information as created by `make_vast_predict_df`
#' @param out_dir = Directory to save the tidy model dataframe as an .rds file
#'
#' @return A VAST seasonal dataset, ready to be split into a `sample data` dataframe and a `covariate data` dataframe. This file is also saved in out_dir.
#'
#' @export
make_vast_seasonal_data<- function(tidy_mod_data, fit_seasons, nmfs_species_code, fit_year_min, fit_year_max, pred_years, pred_df, out_dir){
# For debugging
if(FALSE){
tar_load(tidy_mod_data)
nmfs_species_code = nmfs_species_code
fit_year_min = fit_year_min
fit_year_max = fit_year_max
fit_seasons = fit_seasons
pred_years = pred_years
tar_load(vast_predict_df)
pred_df = vast_predict_df
out_dir = here::here("scratch/aja/targets_flow/data/combined/")
tar_load(tidy_mod_data)
fit_seasons
}
# Some work on the time span and seasons
# Previous implementation before trying to include both surveys within a given season
# data_temp<- tidy_mod_data %>%
# filter(., NMFS_SVSPP == nmfs_species_code) %>%
# filter(., EST_YEAR >= fit_year_min & EST_YEAR <= fit_year_max) %>%
# mutate(., "VAST_SEASON" = case_when(
# SURVEY == "DFO" & SEASON == "SPRING" ~ "DFO",
# SURVEY == "NMFS" & SEASON == "SPRING" ~ "SPRING",
# SURVEY == "DFO" & SEASON == "SUMMER" ~ "SUMMER",
# SURVEY == "NMFS" & SEASON == "FALL" ~ "FALL")) %>%
# drop_na(VAST_SEASON)
# New implementatiom...
data_temp<- tidy_mod_data %>%
filter(., NMFS_SVSPP == nmfs_species_code) %>%
filter(., EST_YEAR >= fit_year_min & EST_YEAR <= fit_year_max) %>%
mutate(., "VAST_SEASON" = case_when(
SURVEY == "DFO" & SEASON == "SPRING" ~ "SPRING",
SURVEY == "NMFS" & SEASON == "SPRING" ~ "SPRING",
SURVEY == "DFO" & SEASON == "SUMMER" ~ "SUMMER",
SURVEY == "NMFS" & SEASON == "FALL" ~ "FALL",
SURVEY == "DFO" & SEASON == "FALL" ~ as.character("NA"))) %>%
drop_na(VAST_SEASON)
data_temp<- data_temp %>%
filter(., VAST_SEASON %in% fit_seasons)
# Set of years and seasons. The DFO spring survey usually occurs before the NOAA NEFSC spring survey, so ordering accordingly. Pred year max or fit year max??
all_years<- seq(from = fit_year_min, to = fit_year_max, by = 1)
#all_years<- seq(from = fit_year_min, to = pred_years, by = 1)
all_seasons<- fit_seasons
yearseason_set<- expand.grid("SEASON" = all_seasons, "EST_YEAR" = all_years)
all_yearseason_levels<- apply(yearseason_set[,2:1], MARGIN = 1, FUN = paste, collapse = "_")
# year_set<- sort(unique(data_temp$EST_YEAR))
# season_set<- c("DFO", "SPRING", "FALL")
#
# # Create a grid with all unique combinations of seasons and years and then combine these into one "year_season" variable
# yearseason_grid<- expand.grid("SEASON" = season_set, "EST_YEAR" = year_set)
# yearseason_levels<- apply(yearseason_grid[, 2:1], MARGIN = 1, FUN = paste, collapse = "_")
# yearseason_labels<- round(yearseason_grid$EST_YEAR + (as.numeric(factor(yearseason_grid$VAST_SEASON, levels = season_set))-1)/length(season_set), digits = 1)
#
# Similar process, but for the observations
yearseason_i<- apply(data_temp[, c("EST_YEAR", "VAST_SEASON")], MARGIN = 1, FUN = paste, collapse = "_")
yearseason_i<- factor(yearseason_i, levels = all_yearseason_levels)
# Add the year_season factor column to our sampling_data data set
data_temp$VAST_YEAR_SEASON<- yearseason_i
data_temp$VAST_SEASON = factor(data_temp$VAST_SEASON, levels = all_seasons)
# VAST year
data_temp$VAST_YEAR_COV<- ifelse(data_temp$EST_YEAR > fit_year_max, fit_year_max, data_temp$EST_YEAR)
#data_temp$VAST_YEAR_COV<- data_temp$EST_YEAR
data_temp$PredTF<- FALSE
# Ordering...
cov_names<- names(data_temp)[-which(names(data_temp) %in% c("ID", "DATE", "EST_YEAR", "SEASON", "SURVEY", "SVVESSEL", "DECDEG_BEGLAT", "DECDEG_BEGLON", "NMFS_SVSPP", "DFO_SPEC", "PRESENCE", "BIOMASS", "ABUNDANCE", "PredTF", "VAST_YEAR_COV", "VAST_SEASON", "VAST_YEAR_SEASON"))]
cov_names<- cov_names[-which(cov_names == "Season_Match")]
data_temp<- data_temp %>%
dplyr::select("ID", "DATE", "EST_YEAR", "SEASON", "SURVEY", "SVVESSEL", "DECDEG_BEGLAT", "DECDEG_BEGLON", "NMFS_SVSPP", "DFO_SPEC", "PRESENCE", "BIOMASS", "ABUNDANCE", "PredTF", "VAST_YEAR_COV", "VAST_SEASON", "VAST_YEAR_SEASON", {{cov_names}})
# Make dummy data for all year_seasons to estimate gaps in sampling if needed
dummy_data<- data.frame("ID" = sample(data_temp$ID, size = 1), "DATE" = mean(data_temp$DATE, na.rm = TRUE), "EST_YEAR" = yearseason_set[,'EST_YEAR'], "SEASON" = yearseason_set[,'SEASON'], "SURVEY" = "DUMMY", "SVVESSEL" = "DUMMY", "DECDEG_BEGLAT" = mean(data_temp$DECDEG_BEGLAT, na.rm = TRUE), "DECDEG_BEGLON" = mean(data_temp$DECDEG_BEGLON, na.rm = TRUE), "NMFS_SVSPP" = "DUMMY", "DFO_SPEC" = "DUMMY", "PRESENCE" = 1, "BIOMASS" = 1, "ABUNDANCE" = 1, "PredTF" = TRUE, "VAST_YEAR_COV" = yearseason_set[,'EST_YEAR'], "VAST_SEASON" = yearseason_set[,'SEASON'], "VAST_YEAR_SEASON" = all_yearseason_levels)
# Add in "covariates"
col_ind<- ncol(dummy_data)
for(i in seq_along(cov_names)){
col_ind<- col_ind+1
cov_vec<- unlist(data_temp[,{{cov_names}}[i]])
dummy_data[,col_ind]<- mean(cov_vec, na.rm = TRUE)
names(dummy_data)[col_ind]<- {{cov_names}}[i]
}
# Combine with original dataset
vast_data_out<- rbind(data_temp, dummy_data)
vast_data_out$VAST_YEAR_COV<- factor(vast_data_out$VAST_YEAR_COV, levels = seq(from = fit_year_min, to = fit_year_max, by = 1))
#vast_data_out$VAST_YEAR_COV<- factor(vast_data_out$VAST_YEAR_COV, levels = seq(from = fit_year_min, to = pred_years, by = 1))
# If we have additional years that we want to predict to and NOT Fit too, we aren't quite done just yet...
if(!is.null(pred_df)){
# Name work...
pred_df<- pred_df %>%
dplyr::select(., -Summarized, -Ensemble_Stat)
# Add those -- check names first
check_names<- all(colnames(pred_df) %in% colnames(vast_data_out)) & all(colnames(vast_data_out) %in% colnames(pred_df))
if(!check_names){
print("Check data and prediction column names, they don't match")
stop()
} else {
pred_df_bind<- pred_df %>%
dplyr::select(., colnames(vast_data_out))
# # We only need one observation for each of the times...
pred_df_bind<- pred_df %>%
dplyr::select(., colnames(vast_data_out)) %>%
distinct(., ID, .keep_all = TRUE)
vast_data_out<- rbind(vast_data_out, pred_df_bind)
}
}
# Save and return it
saveRDS(vast_data_out, file = paste(out_dir, "vast_data.rds", sep = "/"))
return(vast_data_out)
}
#' @title Make VAST sample dataset
#'
#' @description This function creates a VAST sample dataset to pass into calls to `VAST::fit_model`.
#'
#' @param vast_seasonal_data = Description
#' @param out_dir = Description
#'
#' @return A sample dataframe that includes all of the "sample" or species occurrence information. This file is also saved in out_dir.
#'
#' @export
make_vast_sample_data<- function(vast_seasonal_data, fit_seasons, out_dir){
# For debugging
if(FALSE){
tar_load(vast_seasonal_data)
out_dir = here::here("scratch/aja/targets_flow/data/dfo/combined")
}
# Select columns we want from the "full" vast_seasonal_data dataset. Area swept Marine fish diversity on the Scotian Shelf, Canada
vast_samp_dat<- data.frame(
"Year" = as.numeric(vast_seasonal_data$VAST_YEAR_SEASON)-1,
"Lat" = vast_seasonal_data$DECDEG_BEGLAT,
"Lon" = vast_seasonal_data$DECDEG_BEGLON,
"Biomass" = vast_seasonal_data$BIOMASS,
"Swept" = ifelse(vast_seasonal_data$SURVEY == "NMFS", 0.0384, 0.0404),
"Pred_TF" = vast_seasonal_data$PredTF
)
# Save and return it
saveRDS(vast_samp_dat, file = paste(out_dir, "vast_sample_data.rds", sep = "/"))
return(vast_samp_dat)
}
#' @title Make VAST covariate dataset
#'
#' @description This function creates a VAST covariate dataset to pass into calls to `VAST::fit_model`.
#'
#' @param vast_seasonal_data = Description
#' @param rescale = Logical indicating whether or not the covariates should be rescaled.
#' @param out_dir = Description
#'
#' @return A sample dataframe that includes all of the covariate information at each unique sample. This file is also saved in out_dir.
#'
#' @export
make_vast_covariate_data<- function(vast_seasonal_data, out_dir){
# For debugging
if(FALSE){
tar_load(vast_seasonal_data)
rescale =
out_dir = here::here("scratch/aja/targets_flow/data/dfo/combined")
}
# Some work to make sure that we don't allow covariates for the "DUMMY" observations to be used at the knots...
vast_seasonal_data_temp<- vast_seasonal_data
# Select columns we want from the "full" vast_seasonal_data dataset
vast_cov_dat<- data.frame(
"Year" = as.numeric(vast_seasonal_data_temp$VAST_YEAR_SEASON)-1,
"Year_Cov" = vast_seasonal_data_temp$VAST_YEAR_COV,
"Season" = vast_seasonal_data_temp$VAST_SEASON,
"Depth" = vast_seasonal_data_temp$Depth,
"SST_seasonal" = vast_seasonal_data_temp$SST_seasonal,
"BT_seasonal" = vast_seasonal_data_temp$BT_seasonal,
"BS_seasonal" = vast_seasonal_data_temp$BS_seasonal,
"SS_seasonal" = vast_seasonal_data_temp$SS_seasonal,
"Lat" = vast_seasonal_data_temp$DECDEG_BEGLAT,
"Lon" = vast_seasonal_data_temp$DECDEG_BEGLON
)
# Save and return
saveRDS(vast_cov_dat, file = paste(out_dir, "vast_covariate_data.rds", sep = "/"))
return(vast_cov_dat)
}
#' @title Make VAST catachability
#'
#' @description This function creates a VAST catachability dataset to pass into calls to `VAST::fit_model`.
#'
#' @param vast_seasonal_data = Description
#' @param out_dir = Description
#'
#' @return A sample dataframe that includes all of the covariate information at each unique sample. This file is also saved in out_dir.
#'
#' @export
make_vast_catchability_data<- function(vast_seasonal_data, out_dir){
# For debugging
if(FALSE){
vast_seasonal_data
out_dir = here::here("scratch/aja/targets_flow/data/dfo/combined")
}
# Select columns we want from the "full" vast_seasonal_data dataset
vast_catch_dat<- data.frame(
"Year" = as.numeric(vast_seasonal_data$VAST_YEAR_SEASON)-1,
"Year_Cov" = vast_seasonal_data$VAST_YEAR_COV,
"Season" = vast_seasonal_data$VAST_SEASON,
"Lat" = vast_seasonal_data$DECDEG_BEGLAT,
"Lon" = vast_seasonal_data$DECDEG_BEGLON,
"Survey" = factor(vast_seasonal_data$SURVEY, levels = c("NMFS", "DFO", "DUMMY"))
)
# Save and return it
saveRDS(vast_catch_dat, file = paste(out_dir, "vast_catchability_data.rds", sep = "/"))
return(vast_catch_dat)
}
#' @title Read in shapefile
#'
#' @description A short function to read in a shapefile given a file path
#'
#' @param file_path = File path to geospatial vector polygon file with .shp extension, specifying the location and shape of the area of interest.
#' @param factor_vars = Names of factor columns that should be checked and converted if necessary
#'
#' @return SF poylgon
#'
#' @export
read_polyshape<- function(polyshape_path){
# For debugging
if(FALSE){
polyshape_path = "~/Box/RES_Data/Shapefiles/NELME_regions/NELME_sf.shp"
}
# Read in polygon shapefile from file_path
shapefile<- st_read(polyshape_path)
# Return it
return(shapefile)
}
####
#' @title Make VAST extrapolation grid settings from a shapefile
#'
#' @description Create a list of with information defining the extrapolation grid and used by subsequent VAST functions, leveraging code here: https://github.com/James-Thorson-NOAA/VAST/wiki/Creating-an-extrapolation-grid.
#'
#' @param region_shapefile = A geospatial vector sf polygon file, specifying the location and shape of the area of of spatial domain
#' @param index_shapes = A multipolygon geospatial vector sf polygon file, specifying sub regions of interest. Grid locations are assigned to their subregion within the total spatial domain.
#' @param cell_size = The size of grid in meters (since working in UTM). This will control the resolution of the extrapolation grid.
#'
#' @return Tagged list containing extrapolation grid settings needed to fit a VAST model of species occurrence.
#'
#' @export
vast_make_extrap_grid<- function(region_shapefile, index_shapes, strata.limits, cell_size){
# For debugging
if(FALSE){
tar_load(index_shapefiles)
index_shapes = index_shapefiles
strata.limits = strata_use
cell_size = 25000
}
# Transform crs of shapefile to common WGS84 lon/lat format.
region_wgs84<- st_transform(region_shapefile, crs = "+proj=longlat +lat_0=90 +lon_0=180 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0 ")
# Get UTM zone
lon<- sum(st_bbox(region_wgs84)[c(1,3)])/2
utm_zone<- floor((lon + 180)/6)+1
# Transform to the UTM zone
crs_utm<- st_crs(paste0("+proj=utm +zone=", utm_zone, " +ellps=WGS84 +datum=WGS84 +units=m +no_defs "))
region_utm<- st_transform(region_wgs84, crs = crs_utm)
# Make extrapolation grid with sf
region_grid<- st_as_sf(st_make_grid(region_utm, cellsize = cell_size, what = "centers"), crs = crs_utm)
# Now get only the points that fall within the shape polygon
points_keep<- data.frame("pt_row" = seq(from = 1, to = nrow(region_grid), by = 1), "in_out" = st_intersects(region_grid, region_utm, sparse = FALSE))
region_grid<- region_grid %>%
mutate(., "in_poly" = st_intersects(region_grid, region_utm, sparse = FALSE)) %>%
filter(., in_poly == TRUE)
# Convert back to WGS84 lon/lat, as that is what VAST expects.
extrap_grid<- region_grid %>%
st_transform(., crs = "+proj=longlat +lat_0=90 +lon_0=180 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0 ") %>%
st_join(., index_shapes, join = st_within) %>%
mutate(., "Lon" = as.numeric(st_coordinates(.)[,1]),
"Lat" = as.numeric(st_coordinates(.)[,2])) %>%
st_drop_geometry() %>%
dplyr::select(., Lon, Lat, Region) %>%
mutate(., Area_km2=((cell_size/1000)^2),
STRATA = factor(Region, levels = index_shapes$Region, labels = index_shapes$Region))
# Return it
return(extrap_grid)
}
####
#' @title Make VAST model settings
#'
#' @description Create a list of model settings needed to fit a VAST model for species occurrence, largely copied from VAST::make_settings
#'
#' @param extrap_grid = User created extrapolation grid from vast_make_extrap_grid.
#' @param FieldConfig = A vector defining the number of spatial (Omega) and spatio-temporal (Epsilon) factors to include in the model for each of the linear predictors. For each factor, possible values range from 0 (which effectively turns off a given factor), to the number of categories being modeled. If FieldConfig < number of categories, VAST estimates common factors and then loading matrices.
#' @param RhoConfig = A vector defining the temporal structure of intercepts (Beta) and spatio-temporal (Epsilon) variation for each of the linear predictors. See `VAST::make_data` for options.
#' @param bias.correct = Logical boolean determining if Epsilon bias-correction should be done.
#' @param Options = Tagged vector to turn on or off specific options (e.g., SD_site_logdensity, Effective area, etc)
#' @param strata.limits
#'
#' @return Tagged list containing settings needed to fit a VAST model of species occurrence.
#'
#' @export
vast_make_settings <- function(extrap_grid, n_knots, FieldConfig, RhoConfig, OverdispersionConfig, bias.correct, knot_method, inla_method, Options, strata.limits){
# For debugging
if(FALSE){
tar_load(vast_extrap_grid)
extrap_grid = vast_extrap_grid
FieldConfig = c("Omega1" = 1, "Epsilon1" = 1, "Omega2" = 1, "Epsilon2" = 1)
RhoConfig = c("Beta1" = 3, "Beta2" = 3, "Epsilon1" = 2, "Epsilon2" = 2)
OverdispersionConfig = c(0, 0)
bias.correct = FALSE
Options = c("Calculate_Range"=TRUE)
strata.limits = strata_use
n_knots = 400
knot_method = "samples"
inla_method = "Barrier"
}
# Run FishStatsUtils::make_settings
settings_out<- make_settings(n_x = n_knots, Region = "User", purpose = "index2", FieldConfig = FieldConfig, RhoConfig = RhoConfig, ObsModel = c(2, 1), OverdispersionConfig = OverdispersionConfig, bias.correct = bias.correct, knot_method = knot_method, treat_nonencounter_as_zero = FALSE, strata.limits = strata.limits)
settings_out$Method<- inla_method
# Adjust options?
options_new<- settings_out$Options
if(!is.null(Options)){
for(i in seq_along(Options)){
options_adjust_i<- Options[i]
options_new[[which(names(options_new) == names(options_adjust_i))]]<- options_adjust_i
}
settings_out<- make_settings(n_x = n_knots, Region = "User", purpose = "index2", FieldConfig = FieldConfig, RhoConfig = RhoConfig, ObsModel = c(1, 1), OverdispersionConfig = OverdispersionConfig, bias.correct = bias.correct, knot_method = knot_method, treat_nonencounter_as_zero = FALSE, strata.limits = strata.limits, Options = options_new)
settings_out$Method<- inla_method
}
# Return it
return(settings_out)
}
####
#' @title Make VAST spatial info
#'
#' @description Create a tagged list with VAST spatial information needed
#'
#' @param extrap_grid = User created extrapolation grid from vast_make_extrap_grid.
#' @param vast_settings = A
#' @param vast_sample_data = A
#' @param out_dir = A
#'
#' @return Returns a tagged list with extrapolation and spatial info in different slots
#'
#' @export
vast_make_spatial_lists<- function(extrap_grid, vast_settings, tidy_mod_data, out_dir){
# For debugging
if(FALSE){
tar_load(vast_extrap_grid)
extrap_grid = vast_extrap_grid
tar_load(vast_settings)
tar_load(tidy_mod_data)
inla_method = "Barrier"
out_dir = here::here()
}
# Run FishStatsUtiles::make_extrapolation_info
vast_extrap_info<- make_extrapolation_info(Region = vast_settings$Region, strata.limits = vast_settings$strata.limits, input_grid = extrap_grid, DirPath = out_dir)
# Run FishStatsUtils::make_spatial_info
vast_spatial_info<- make_spatial_info(n_x = vast_settings$n_x, Lon_i = tidy_mod_data$DECDEG_BEGLON, Lat_i = tidy_mod_data$DECDEG_BEGLAT, Extrapolation_List = vast_extrap_info, knot_method = vast_settings$knot_method, Method = vast_settings$Method, grid_size_km = vast_settings$grid_size_km, fine_scale = vast_settings$fine_scale, DirPath = out_dir, Save_Results = TRUE)
# Combine into one list of lists
spatial_lists_out<- list(vast_extrap_info, vast_spatial_info)
names(spatial_lists_out)<- c("Extrapolation_List", "Spatial_List")
return(spatial_lists_out)
}
####
#' @title Reduce VAST prediction dataframe from regular grid to knot locations
#'
#' @description Reduce VAST prediction dataframe from regular grid to knot locations
#'
#' @param extrap_grid = User created extrapolation grid from vast_make_extrap_grid.
#' @param vast_settings = A
#' @param vast_sample_data = A
#' @param out_dir = A
#'
#' @return Returns a tagged list with extrapolation and spatial info in different slots
#'
#' @export
reduce_vast_predict_df<- function(vast_predict_df = vast_predict_df, vast_spatial_lists = vast_spatial_lists, out_dir = here::here("data/predict")){
# For debugging
if(FALSE){
tar_load(vast_predict_df)
tar_load(vast_spatial_lists)
}
# Knots_sf
knots_info<- vast_spatial_lists$Spatial_List
knots_sf<- st_as_sf(data.frame(knots_info$loc_x), coords = c("E_km", "N_km"), crs = attributes(knots_info$loc_i)$projCRS)
# Get unique prediction locations and assign each prediction location to its nearest knot?
pred_df_temp<- vast_predict_df %>%
distinct(., DECDEG_BEGLON, DECDEG_BEGLAT)
pred_sf<- points_to_sf(pred_df_temp) %>%
st_transform(., crs = st_crs(knots_sf))
pred_nearest_knot<- pred_sf %>%
mutate(., "Nearest_knot" = st_nearest_feature(x = ., y = knots_sf)) %>%
st_drop_geometry()
# Merge this with full prediction dataset
pred_df_out<- vast_predict_df %>%
left_join(., pred_nearest_knot)
# Average covariate values based on nearest knot location and output reduced dataframe
pred_df_out<- pred_df_out %>%
distinct(., ID, DATE, Nearest_knot, .keep_all = TRUE) %>%
dplyr::select(-Nearest_knot)
return(pred_df_out)
}
####
#' @title Make VAST covariate effect objects
#'
#' @description Create covariate effects for both linear predictors
#'
#' @param X1_coveff_vec = A vector specifying the habitat covariate effects for first linear predictor.
#' @param X2_coveff_vec = A vector specifying the habitat covariate effects for second linear predictor.
#' @param Q1_coveff_vec = A vector specifying the catchability covariate effects for first linear predictor.
#' @param Q2_coveff_vec = A vector specifying the catchability covariate effects for second linear predictor.
#'
#' @return A list with covariate effects for the habitat covariates and first linear predictor (first list slot), habitat covariates and second linear predictor (second list slot), catchability covariates and first linear predictor (third slot) and catchability covariates and second linear predictor (fourth slot).
#'
#' @export
vast_make_coveff<- function(X1_coveff_vec, X2_coveff_vec, Q1_coveff_vec, Q2_coveff_vec){
# For debugging
if(FALSE){
X1_coveff_vec = c(2, 3, 3, 2, rep(3, 32))
X2_coveff_vec = c(2, 3, 3, 2, rep(3, 32))
Q1_coveff_vec = NULL
Q2_coveff_vec = NULL
}
# Combine into a list and name it
if(is.null(Q1_coveff_vec) | is.null(Q2_coveff_vec)){
coveff_out<- list("X1config_cp" = matrix(X1_coveff_vec, nrow = 1), "X2config_cp" = matrix(X2_coveff_vec, nrow = 1), "Q1config_k" = NULL, "Q2config_k" = NULL)
} else {
coveff_out<- list("X1config_cp" = matrix(X1_coveff_vec, nrow = 1), "X2config_cp" = matrix(X2_coveff_vec, nrow = 1), "Q1config_k" = matrix(Q1_coveff_vec, nrow = 1), "Q2config_k" = matrix(Q2_coveff_vec, nrow = 1))
}
# Return it
return(coveff_out)
}
####
#' @title Build VAST SDM
#'
#' @description Build VAST species distribution model, without running it. This can be helpful to check settings before running `vast_fit_sdm`. Additionally, it can be helpful for making subsequent modifications, particularly to mapping.
#'
#' @param settings = A tagged list with the settings for the model, created with `vast_make_settings`.
#' @param extrap_grid = An extrapolation grid, created with `vast_make_extrap_grid`.
#' @param Method = A character string specifying which Method to use when making the mesh.
#' @param sample_dat = A data frame with the biomass sample data for each species at each tow.
#' @param covariate_dat = A data frame with the habitat covariate data for each tow.
#' @param X1_formula = A formula for the habitat covariates and first linear predictor.
#' @param X2_formula = A formula for the habitat covariates and second linear predictor.
#' @param X_contrasts = A tagged list specifying the contrasts to use for factor covariates in the model.
#' @param Xconfig_list = A tagged list specifying the habitat and catchability covariate effects for first and second linear predictors.
#' @param catchability_data = A data frame with the catchability data for every sample
#' @param Q1_formula = A formula for the catchability covariates and first linear predictor.
#' @param Q2_formula = A formula for the catchability covariates and second linear predictor.
#' @param index_shapefiles = A sf object with rows for each of the regions of interest
#'
#' @return A VAST `fit_model` object, with the inputs and built TMB object components.
#'
#' @export
vast_build_sdm <- function(settings, extrap_grid, sample_data, covariate_data, X1_formula, X2_formula, X_contrasts, Xconfig_list, catchability_data, Q1_formula, Q2_formula, index_shapes, spatial_info_dir){
# For debugging
if(FALSE){
library(VAST)
library(tidyverse)
library(stringr)
# Seasonal
tar_load(vast_settings)
settings = vast_settings
tar_load(vast_extrap_grid)
extrap_grid = vast_extrap_grid
tar_load(vast_sample_data)
sample_data = vast_sample_data
tar_load(vast_covariate_data)
covariate_data = vast_covariate_data
X1_formula = hab_formula
X2_formula = hab_formula
hab_env_coeffs_n = hab_env_coeffs_n
tar_load(vast_catchability_data)
catchability_data = vast_catchability_data
catch_formula<- ~ Survey
Q1_formula = catch_formula
Q2_formula = catch_formula
X_contrasts = list(Season = contrasts(vast_covariate_data$Season, contrasts = FALSE), Year_Cov = contrasts(vast_covariate_data$Year_Cov, contrasts = FALSE))
# X_contrasts = list(Year_Cov = contrasts(vast_covariate_data$Year_Cov, contrasts = FALSE))
tar_load(vast_coveff)
Xconfig_list = vast_coveff
tar_load(index_shapefiles)
index_shapes = index_shapefiles
spatial_info_dir = here::here("")
# Annual
tar_load(vast_settings)
settings = vast_settings
tar_load(vast_extrap_grid)
extrap_grid = vast_extrap_grid
tar_load(vast_sample_data)
sample_data = vast_sample_data
tar_load(vast_covariate_data)
covariate_data = vast_covariate_data
X1_formula = hab_formula
X2_formula = hab_formula
hab_env_coeffs_n = hab_env_coeffs_n
tar_load(vast_catchability_data)
catchability_data = vast_catchability_data
catch_formula<- ~ 0
Q1_formula = catch_formula
Q2_formula = catch_formula
X_contrasts = list(Year_Cov = contrasts(vast_covariate_data$Year_Cov, contrasts = FALSE))
tar_load(vast_coveff)
Xconfig_list = vast_coveff
tar_load(index_shapefiles)
index_shapes<- index_shapefiles
}
# Check names
samp_dat_names<- c("Lat", "Lon", "Year", "Biomass", "Swept", "Pred_TF")
if(!(all(samp_dat_names %in% names(sample_data)))){
stop(paste("Check names in sample data. Must include:", paste0(samp_dat_names, collapse = ","), sep = " "))
}
# Covariate data frame names
if(!is.null(covariate_data)){
cov_dat_names1<- unlist(str_extract_all(X1_formula, boundary("word"))[[2]])
# Remove some stuff associated with the splines...
spline_words<- c("bs", "degree", "TRUE", "intercept", unique(as.numeric(unlist(str_extract_all(X1_formula, pattern = "[0-9]+", simplify = TRUE)))), "FALSE")
cov_dat_names1<- cov_dat_names1[-which(cov_dat_names1 %in% spline_words)]
cov_dat_names2<- unlist(str_extract_all(X2_formula, boundary("word"))[[2]])
cov_dat_names2<- cov_dat_names2[-which(cov_dat_names2 %in% spline_words)]
cov_dat_names_all<- unique(c(cov_dat_names1, cov_dat_names2))
if(!(all(cov_dat_names_all %in% names(covariate_data)))){
print(names(covariate_data))
print(names(cov_dat_names_all))
stop(paste("Check names in covariate data. Must include", paste0(cov_dat_names_all, collapse = ","), sep = " "))
}
}
if(!(all(c("X1config_cp", "X2config_cp", "Q1config_k", "Q2config_k") %in% names(Xconfig_list)))){
stop(paste("Check names of Xconfig_list. Must be", paste0(c("X1config_cp", "X2config_cp", "Q1config_k", "Q2config_k"), collapse = ","), sep = ""))
}
# Run VAST::fit_model with correct info and settings
vast_build_out<- fit_model_aja("settings" = settings, "Method" = settings$Method, "input_grid" = extrap_grid, "Lat_i" = sample_data[, 'Lat'], "Lon_i" = sample_data[, 'Lon'], "t_i" = sample_data[, 'Year'], "c_i" = rep(0, nrow(sample_data)), "b_i" = sample_data[, 'Biomass'], "a_i" = sample_data[, 'Swept'], "PredTF_i" = sample_data[, 'Pred_TF'], "X1config_cp" = Xconfig_list[['X1config_cp']], "X2config_cp" = Xconfig_list[['X2config_cp']], "covariate_data" = covariate_data, "X1_formula" = X1_formula, "X2_formula" = X2_formula, "X_contrasts" = X_contrasts, "catchability_data" = catchability_data, "Q1_formula" = Q1_formula, "Q2_formula" = Q2_formula, "Q1config_k" = Xconfig_list[['Q1config_k']], "Q2config_k" = Xconfig_list[['Q2config_k']], "newtonsteps" = 1, "getsd" = TRUE, "getReportCovariance" = TRUE, "run_model" = FALSE, "test_fit" = FALSE, "Use_REML" = FALSE, "getJointPrecision" = TRUE, "index_shapes" = index_shapes, "DirPath" = spatial_info_dir)
# Return it
return(vast_build_out)
}
####
#' @title Adjust VAST SDM
#'
#' @description Make adjustments to VAST SDM and the model returned in `vast_build_sdm`. This can either be the exact same as the one built using `vast_build_sdm`, or it can update that model with adjustments provided in a tagged list.
#'
#' @param vast_build = A VAST `fit_model` object.
#' @param adjustments = Either NULL (default) or a tagged list identifying adjustments that should be made to the vast_build `fit_model` object. If NULL, the identical model defined by the `vast_build` is run and fitted.
#' @param index_shapefiles = A sf object with rows for each of the regions of interest
#'
#' @return A VAST fit_model object, with the inputs and built TMB object components.
#'
#' @export
vast_make_adjustments <- function(vast_build, index_shapes, spatial_info_dir, adjustments = NULL){
# For debugging
if(FALSE){
tar_load(vast_build0)
vast_build = vast_build0
tar_load(vast_covariate_data)
adjustments = list("log_sigmaXi1_cp" = factor(c(rep(1, length(unique(fit_seasons))), rep(4, nlevels(vast_covariate_data$Year_Cov)), rep(NA, gam_degree*hab_env_coeffs_n))), "log_sigmaXi2_cp" = factor(c(rep(1, length(unique(fit_seasons))), rep(4, nlevels(vast_covariate_data$Year_Cov)), rep(NA, gam_degree*hab_env_coeffs_n))), "lambda1_k" = factor(c(1, NA)), "lambda2_k" = factor(c(1, NA)))
tar_load(index_shapefiles)
index_shapes<- index_shapefiles
}
# If no adjustments are needed, just need to pull information from vast_build and then set "run_model" to TRUE
if(is.null(adjustments)){
vast_build_adjust_out<- fit_model_aja("settings" = vast_build$settings, "input_grid" = vast_build$input_args$data_args_input$input_grid, "Method" = vast_build$settings$Method, "Lat_i" = vast_build$data_frame[, 'Lat_i'], "Lon_i" = vast_build$data_frame[, 'Lon_i'], "t_i" = vast_build$data_frame[, 't_i'], "c_iz" = vast_build$data_frame[, 'c_iz'], "b_i" = vast_build$data_frame[, 'b_i'], "a_i" = vast_build$data_frame[, 'a_i'], "PredTF_i" = vast_build$data_list[['PredTF_i']], "X1config_cp" = vast_build$input_args$data_args_input[['X1config_cp']], "X2config_cp" = vast_build$input_args$data_args_input[['X2config_cp']], "covariate_data" = vast_build$input_args$data_args_input$covariate_data, "X1_formula" = vast_build$input_args$data_args_input$X1_formula, "X2_formula" = vast_build$input_args$data_args_input$X2_formula, "X_contrasts" = vast_build$input_args$data_args_input$X_contrasts, "catchability_data" = vast_build$input_args$data_args_input$catchability_data, "Q1_formula" = vast_build$input_args$data_args_input$Q1_formula, "Q2_formula" = vast_build$input_args$data_args_input$Q2_formula, "Q1config_k" = vast_build$input_args$data_args_input[['Q1config_cp']], "Q2config_k" = vast_build$input_args$data_args_input[['Q2config_k']], "newtonsteps" = 1, "getsd" = TRUE, "getReportCovariance" = TRUE, "run_model" = FALSE, "test_fit" = FALSE, "Use_REML" = FALSE, "getJointPrecision" = vast_build$input_args$extra_args$getJointPrecision, "index_shapes" = index_shapes, "DirPath" = spatial_info_dir)
}
# If there are adjustments, need to make those and then re run model.
if(!is.null(adjustments)){
# Check names -- trying to think of what the possible adjustment flags would be in the named list
adjust_names<- c("FieldConfig", "RhoConfig", "X1_formula", "X2_formula", "X1config_cp", "X2config_cp", "X_contrasts", "log_sigmaXi1_cp", "log_sigmaXi2_cp", "lambda1_k", "lambda2_k", "Q1_formula", "Q2_formula", "Q1config_k", "Q2config_k")
if(!(all(names(adjustments) %in% adjust_names))){
stop(paste("Check names in adjustment list. Must be one of", paste0(adjust_names, collapse = ","), sep = " "))
}
# First options are going to be in the settings bit..
if(any(names(adjustments) %in% c("FieldConfig", "RhoConfig"))){
# Get just the settings adjustments
settings_adjusts<- names(adjustments)[which(names(adjustments) %in% names(vast_build$settings))]
for(i in seq_along(settings_adjusts)){
setting_adjust_i<- settings_adjusts[i]
vast_build$settings[[{{setting_adjust_i}}]]<- adjustments[[{{setting_adjust_i}}]]
}
}
# A lot of stuff is going to be in the `vast_build$input_args$data_args_input` object
if(any(names(adjustments) %in% names(vast_build$input_args$data_args_input))){
# Get just the data args adjustments
data_adjusts<- names(adjustments)[which(names(adjustments) %in% names(vast_build$input_args$data_args_input))]
for(i in seq_along(data_adjusts)){
data_adjust_i<- data_adjusts[i]
vast_build$input_args$data_args_input[[{{data_adjust_i}}]]<- adjustments[[{{data_adjust_i}}]]
}
}
# Only other adjustment (for now) is Map.
if(any(names(adjustments) %in% c("log_sigmaXi1_cp", "log_sigmaXi2_cp", "lambda1_k", "lambda2_k"))){
# Get the original, which we can then edit...
map_adjust_out<- vast_build$tmb_list$Map
# Get just the map adjustment names
map_adjusts<- names(adjustments)[which(names(adjustments) %in% names(vast_build$tmb_list$Map))]
# Loop over them
for(i in seq_along(map_adjusts)){
map_adjust_i<- map_adjusts[i]
map_adjust_out[[{{map_adjust_i}}]]<- adjustments[[{{map_adjust_i}}]]
}
}
# Now, re-build and fit model. This is slightly different if we have changed map or not...
if(any(names(adjustments) %in% c("log_sigmaXi1_cp", "log_sigmaXi2_cp", "lambda1_k", "lambda2_k"))){
# Adding Map argument
vast_build_adjust_out<- fit_model_aja("settings" = vast_build$settings, "input_grid" = vast_build$input_args$data_args_input$input_grid, "Method" = vast_build$settings$Method, "Lat_i" = vast_build$data_frame[, 'Lat_i'], "Lon_i" = vast_build$data_frame[, 'Lon_i'], "t_i" = vast_build$data_frame[, 't_i'], "c_iz" = vast_build$data_frame[, 'c_iz'], "b_i" = vast_build$data_frame[, 'b_i'], "a_i" = vast_build$data_frame[, 'a_i'], "PredTF_i" = vast_build$data_list[['PredTF_i']], "X1config_cp" = vast_build$input_args$data_args_input[['X1config_cp']], "X2config_cp" = vast_build$input_args$data_args_input[['X2config_cp']], "covariate_data" = vast_build$input_args$data_args_input$covariate_data, "X1_formula" = vast_build$input_args$data_args_input$X1_formula, "X2_formula" = vast_build$input_args$data_args_input$X2_formula, "X_contrasts" = vast_build$input_args$data_args_input$X_contrasts, "catchability_data" = vast_build$input_args$data_args_input$catchability_data, "Q1_formula" = vast_build$input_args$data_args_input$Q1_formula, "Q2_formula" = vast_build$input_args$data_args_input$Q2_formula, "Q1config_k" = vast_build$input_args$data_args_input[['Q1config_k']], "Q2config_k" = vast_build$input_args$data_args_input[['Q2config_k']], "Map" = map_adjust_out, "newtonsteps" = 1, "getsd" = TRUE, "getReportCovariance" = TRUE, "run_model" = FALSE, "test_fit" = FALSE, "Use_REML" = FALSE, "getJointPrecision" = FALSE, "index_shapes" = index_shapes, "DirPath" = spatial_info_dir)
} else {
# No need for Map argument, just build and fit
vast_build_adjust_out<- fit_model_aja("settings" = vast_build$settings, "input_grid" = vast_build$input_args$data_args_input$input_grid, "Method" = vast_build$settings$Method, "Lat_i" = vast_build$data_frame[, 'Lat_i'], "Lon_i" = vast_build$data_frame[, 'Lon_i'], "t_i" = vast_build$data_frame[, 't_i'], "c_iz" = vast_build$data_frame[, 'c_iz'], "b_i" = vast_build$data_frame[, 'b_i'], "a_i" = vast_build$data_frame[, 'a_i'], "PredTF_i" = vast_build$data_list[['PredTF_i']], "X1config_cp" = vast_build$input_args$data_args_input[['X1config_cp']], "X2config_cp" = vast_build$input_args$data_args_input[['X2config_cp']], "covariate_data" = vast_build$input_args$data_args_input$covariate_data, "X1_formula" = vast_build$input_args$data_args_input$X1_formula, "X2_formula" = vast_build$input_args$data_args_input$X2_formula, "X_contrasts" = vast_build$input_args$data_args_input$X_contrasts, "catchability_data" = vast_build$input_args$data_args_input$catchability_data, "Q1_formula" = vast_build$input_args$data_args_input$Q1_formula, "Q2_formula" = vast_build$input_args$data_args_input$Q2_formula, "Q1config_cp" = vast_build$input_args$data_args_input[['Q1config_cp']], "Q2config_cp" = vast_build$input_args$data_args_input[['Q2config_cp']], "newtonsteps" = 1, "getsd" = TRUE, "getReportCovariance" = TRUE, "run_model" = FALSE, "test_fit" = FALSE, "Use_REML" = FALSE, "getJointPrecision" = FALSE, "index_shapes" = index_shapes, "DirPath" = spatial_info_dir)
}
}
# Return it
return(vast_build_adjust_out)
}
#' @title Fit VAST SDM
#'
#' @description Fit VAST species distribution model
#'
#' @param vast_build_adjust = A VAST `fit_model` object.
#' @param nice_category_names =
#' @param index_shapefiles = A sf object with rows for each of the regions of interest
#' @param out_dir
#'
#' @return A VAST fit_model object, with the inputs and and outputs, including parameter estimates, extrapolation gid info, spatial list info, data info, and TMB info.
#'
#' @export
vast_fit_sdm <- function(vast_build_adjust, nice_category_names, index_shapes, spatial_info_dir, out_dir){
# For debugging
if(FALSE){
tar_load(vast_adjust)
vast_build_adjust = vast_adjust
nice_category_names = nice_category_names
out_dir = here::here("results/mod_fits")
tar_load(index_shapefiles)
index_shapes = index_shapefiles
spatial_info_dir = here::here("")
}
# Build and fit model
vast_fit_out<- fit_model_aja("settings" = vast_build_adjust$settings, "input_grid" = vast_build_adjust$input_args$data_args_input$input_grid, "Method" = vast_build_adjust$settings$Method, "Lat_i" = vast_build_adjust$data_frame[, 'Lat_i'], "Lon_i" = vast_build_adjust$data_frame[, 'Lon_i'], "t_i" = vast_build_adjust$data_frame[, 't_i'], "c_iz" = vast_build_adjust$data_frame[, 'c_iz'], "b_i" = vast_build_adjust$data_frame[, 'b_i'], "a_i" = vast_build_adjust$data_frame[, 'a_i'], "PredTF_i" = vast_build_adjust$data_list[['PredTF_i']], "X1config_cp" = vast_build_adjust$input_args$data_args_input[['X1config_cp']], "X2config_cp" = vast_build_adjust$input_args$data_args_input[['X2config_cp']], "covariate_data" = vast_build_adjust$input_args$data_args_input$covariate_data, "X1_formula" = vast_build_adjust$input_args$data_args_input$X1_formula, "X2_formula" = vast_build_adjust$input_args$data_args_input$X2_formula, "X_contrasts" = vast_build_adjust$input_args$data_args_input$X_contrasts, "catchability_data" = vast_build_adjust$input_args$data_args_input$catchability_data, "Q1_formula" = vast_build_adjust$input_args$data_args_input$Q1_formula, "Q2_formula" = vast_build_adjust$input_args$data_args_input$Q2_formula, "Q1config_cp" = vast_build_adjust$input_args$data_args_input[['Q1config_cp']], "Q2config_cp" = vast_build_adjust$input_args$data_args_input[['Q2config_cp']], "Map" = vast_build_adjust$tmb_list$Map, "newtonsteps" = 1, "getsd" = TRUE, "getReportCovariance" = TRUE, "run_model" = TRUE, "test_fit" = FALSE, "Use_REML" = FALSE, "getJointPrecision" = vast_build_adjust$input_args$extra_args$getJointPrecision, "index_shapes" = index_shapes, "DirPath" = spatial_info_dir)
# Save and return it
saveRDS(vast_fit_out, file = paste(out_dir, "/", nice_category_names, "_", "fitted_vast.rds", sep = "" ))
return(vast_fit_out)
}
#' @title Predict fitted VAST model
#'
#' @description This function makes predictions from a fitted VAST SDM to new locations using VAST::predict.fit_model. Importantly, to use this feature for new times, at least one location for each time of interest needs to be included during the model fitting process. This dummy observation should have a PredTF value of 1 so that the observation is only used in the predicted probability and NOT estimating the likelihood.
#'
#' @param vast_fitted_sdm = A fitted VAST SDM object, as returned with `vast_fit_sdm`
#' @param nice_category_names = A
#' @param predict_variable = Which variable should be predicted, default is density (D_i)
#' @param predict_category = Which category (species/age/size) should be predicted, default is 0
#' @param predict_vessel = Which sampling category should be predicted, default is 0
#' @param predict_covariates_df_all = A long data frame with all of the prediction covariates
#' @param memory_save = Logical. If TRUE, then predictions are only made to knots as defined within the vast_fitted_sdm object. This is done by finding the prediction locations that are nearest neighbors to each knot. If FALSE, then predictions are made to each of the locations in the predict_covariates_df_all.
#' @param out_dir = Output directory to save...
#'
#' @return
#'
#' @export
predict_vast<- function(vast_fitted_sdm, nice_category_names, predict_variable = "D_i", predict_category = 0, predict_vessel = 0, predict_covariates_df_all, cov_names, time_col, out_dir){
# For debugging
if(FALSE){
# Targets
tar_load(vast_fit)
vast_fitted_sdm = vast_fit
nmfs_species_code = 101
predict_variable = "Index_gctl"
predict_category = 0
predict_vessel = 0
tar_load(vast_predict_df)
predict_covariates_df_all = vast_predict_df
# Basic example...
vast_fitted_sdm = readRDS(here::here("", "results/mod_fits/1011_fitted_vast.rds"))
nmfs_species_code = 101
predict_variable = "Index_gctl"
predict_category = 0
predict_vessel = 0
predict_covariates_df_all<- pred_df
time_col = "Year"
cov_names = c("Depth", "SST_seasonal", "BT_seasonal")
}
#### Not the biggest fan of this, but for now, building in a work around to resolve some of the memory issues that we were running into by supplying a 0.25 degree grid and trying to predict/project for each season-year from 1980-2100. To overcome this issue, going to try to just make the projections to knots and do the smoothing later.
# First, need to get the knot locations
knot_locs<- data.frame(vast_fitted_sdm$spatial_list$latlon_g) %>%
st_as_sf(., coords = c("Lon", "Lat"), remove = FALSE) %>%
mutate(., "Pt_Id" = 1:nrow(.))
# Nearest knot to each point?
pred_sf<- predict_covariates_df_all %>%
st_as_sf(., coords = c("Lon", "Lat"), remove = FALSE)
pred_sf<- pred_sf %>%
mutate(., "Nearest_Knot" = st_nearest_feature(., knot_locs))
# Average the points...
pred_df_knots<- pred_sf %>%
st_drop_geometry()
group_by_vec<- c({{time_col}}, "Nearest_Knot")
pred_df_knots<- pred_df_knots %>%
group_by_at(.vars = group_by_vec) %>%
summarize_at(all_of(cov_names), mean, na.rm = TRUE) %>%
left_join(., st_drop_geometry(knot_locs), by = c("Nearest_Knot" = "Pt_Id")) %>%
ungroup()
# Collecting necessary bits from the prediction covariates -- lat, lon, time
pred_lats<- pred_df_knots$Lat
pred_lons<- pred_df_knots$Lon
pred_times<- as.numeric(unlist(pred_df_knots[{{time_col}}]))
# Catch stuff...
pred_sampled_areas<- rep(1, length(pred_lats))
pred_category<- rep(predict_category, length(pred_lats))
pred_vessel<- rep(predict_vessel, length(pred_lats))
# Renaming predict_covariates_df_all to match vast_fit_covariate_data
pred_cov_dat_name_order<- which(names(pred_df_knots) %in% names(vast_fitted_sdm$covariate_data))
pred_cov_dat_use<- pred_df_knots[,pred_cov_dat_name_order]
# Catchability data?
if(!is.null(vast_fitted_sdm$catchability_data)){
pred_catch_dat_use<- pred_cov_dat_use %>%
dplyr::select(., c(Year, Year_Cov, Season, Lat, Lon, Survey)
)
pred_catch_dat_use$Survey<- rep("NMFS", nrow(pred_catch_dat_use))
pred_catch_dat_use$Survey<- factor(pred_catch_dat_use$Survey, levels = c("NMFS", "DFO", "DUMMY"))
} else {
pred_catch_dat_use<- NULL
}
# Make the predictions
preds_out<- predict.fit_model_aja(x = vast_fitted_sdm, what = predict_variable, Lat_i = pred_lats, Lon_i = pred_lons, t_i = pred_times, a_i = pred_sampled_areas, c_iz = pred_category, NULL, new_covariate_data = pred_cov_dat_use, new_catchability_data = pred_catch_dat_use, do_checks = FALSE)
# Get everything as a dataframe to make plotting easier...
pred_df_out<- data.frame("Lat" = pred_lats, "Lon" = pred_lons, "Time" = pred_cov_dat_use[,{{time_col}}], "Pred" = preds_out)
# Save and return it
saveRDS(pred_df_out, file = paste(out_dir, "/pred_", predict_variable, "_", nice_category_names, ".rds", sep = "" ))
return(pred_df_out)
}
#' @title Prediction spatial summary
#'
#' @description Calculates average "availability" of fish biomass from SDM predictions within spatial area of interest
#'
#' @param pred_df = A dataframe with Lat, Lon, Time and Pred columns
#' @param spatial_areas =
#' @return What does this function return?
#'
#' @export
pred_spatial_summary<- function(pred_df, spatial_areas){
if(FALSE){
tar_load(vast_fit)
template = raster("~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/data/supporting/HighResTemplate.grd")
tar_load(vast_seasonal_data)
all_times = as.character(levels(vast_seasonal_data$YEAR_SEASON))
plot_times = NULL
tar_load(land_sf)
tar_load(shapefile)
mask = shapefile
land_color = "#d9d9d9"
res_data_path = "~/Box/RES_Data/"
xlim = c(-85, -55)
ylim = c(30, 50)
panel_or_gif = "gif"
panel_cols = NULL
panel_rows = NULL
}
# Plotting at spatial knots...
# Getting prediction array
pred_array<- log(vast_fit$Report$D_gct+1)
# Getting time info
if(!is.null(plot_times)){
plot_times<- all_times[which(all_times) %in% plot_times]
} else {
plot_times<- all_times
}
# Getting spatial information
spat_data<- vast_fit$extrapolation_list
loc_g<- spat_data$Data_Extrap[which(spat_data$Data_Extrap[, "Include"] > 0), c("Lon", "Lat")]
CRS_orig<- sp::CRS("+proj=longlat")
CRS_proj<- sp::CRS(spat_data$projargs)
land_sf<- st_crop(land_sf, xmin = xlim[1], ymin = ylim[1], xmax = xlim[2], ymax = ylim[2])
# Looping through...
rasts_out<- vector("list", dim(pred_array)[3])
rasts_range<- pred_array
rast_lims<- c(round(min(rasts_range)-0.000001, 2), round(max(rasts_range) + 0.0000001, 2))
if(dim(pred_array)[3] == 1){
df<- data.frame(loc_g, z = pred_array[,1,])
points_ll = st_as_sf(data_df, coords = c("Lon", "Lat"), crs = CRS_orig)
points_proj = points_ll %>%
st_transform(., crs = CRS_proj)
points_bbox<- st_bbox(points_proj)
raster_proj<- st_rasterize(points_proj)
raster_proj<- resample(raster_proj, raster(template))
plot_out<- ggplot() +
geom_stars(data = raster_proj, aes(x = x, y = y, fill = z)) +
scale_fill_viridis_c(name = "Density", option = "viridis", na.value = "transparent", limits = rast_lims) +
geom_sf(data = land_sf_proj, fill = land_color, lwd = 0.2) +
coord_sf(xlim = points_bbox[c(1,3)], ylim = points_bbox[c(2,4)], expand = FALSE, datum = sf::st_crs(CRS_proj))
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05))
ggsave(filename = paste(out_dir, file_name, ".png", sep = ""), plot_out, width = 11, height = 8, units = "in")
} else {
for (tI in 1:dim(pred_array)[3]) {
data_df<- data.frame(loc_g, z = pred_array[,1,tI])
# Interpolation
pred_df<- na.omit(data.frame("x" = data_df$Lon, "y" = data_df$Lat, "layer" = data_df$z))
pred_df_interp<- interp(pred_df[,1], pred_df[,2], pred_df[,3], duplicate = "mean", extrap = TRUE,
xo=seq(-87.99457, -57.4307, length = 115),
yo=seq(22.27352, 48.11657, length = 133))
pred_df_interp_final<- data.frame(expand.grid(x = pred_df_interp$x, y = pred_df_interp$y), z = c(round(pred_df_interp$z, 2)))
pred_sp<- st_as_sf(pred_df_interp_final, coords = c("x", "y"), crs = CRS_orig)
pred_df_temp<- pred_sp[which(st_intersects(pred_sp, mask, sparse = FALSE) == TRUE),]
coords_keep<- as.data.frame(st_coordinates(pred_df_temp))
row.names(coords_keep)<- NULL
pred_df_use<- data.frame(cbind(coords_keep, "z" = as.numeric(pred_df_temp$z)))
names(pred_df_use)<- c("x", "y", "z")
# raster_proj<- raster::rasterize(as_Spatial(points_ll), template, field = "z", fun = mean)
# raster_proj<- as.data.frame(raster_proj, xy = TRUE)
#
time_plot_use<- plot_times[tI]
rasts_out[[tI]]<- ggplot() +
geom_tile(data = pred_df_use, aes(x = x, y = y, fill = z)) +
scale_fill_viridis_c(name = "Log (density+1)", option = "viridis", na.value = "transparent", limits = rast_lims) +
annotate("text", x = -65, y = 37.5, label = time_plot_use) +
geom_sf(data = land_sf, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = FALSE) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt"))
}
if(panel_or_gif == "panel"){
# Panel plot
all_plot<- wrap_plots(rasts_out, ncol = panel_cols, nrow = panel_rows, guides = "collect", theme(plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt")))
ggsave(filename = paste(working_dir, file_name, ".png", sep = ""), all.plot, width = 11, height = 8, units = "in")
} else {
# Make a gif
plot_loop_func<- function(plot_list){
for (i in seq_along(plot_list)) {
plot_use<- plot_list[[i]]
print(plot_use)
}
}
invisible(save_gif(plot_loop_func(rasts_out), paste0(out_dir, nmfs_species_code, "_LogDensity.gif"), delay = 0.75, progress = FALSE))
}
}
}
#' @title Plot VAST model predicted density surfaces
#'
#' @description Creates either a panel plot or a gif of VAST model predicted density surfaces
#'
#' @param vast_fit = A VAST `fit_model` object.
#' @param nice_category_names = A
#' @param all_times = A vector of all of the unique time steps available from the VAST fitted model
#' @param plot_times = Either NULL to make a plot for each time in `all_times` or a vector of all of the times to plot, which must be a subset of `all_times`
#' @param land_sf = Land sf object
#' @param xlim = A two element vector with the min and max longitudes
#' @param ylim = A two element vector with the min and max latitudes
#' @param panel_or_gif = A character string of either "panel" or "gif" indicating how the multiple plots across time steps should be displayed
#' @param out_dir = Output directory to save the panel plot or gif
#'
#' @return A VAST fit_model object, with the inputs and and outputs, including parameter estimates, extrapolation gid info, spatial list info, data info, and TMB info.
#'
#' @export
vast_fit_plot_density<- function(vast_fit, nice_category_names, mask, all_times = all_times, plot_times = NULL, land_sf, xlim, ylim, panel_or_gif = "gif", out_dir, land_color = "#d9d9d9", panel_cols = NULL, panel_rows = NULL, ...){
if(FALSE){
tar_load(vast_fit)
template = raster("~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/data/supporting/HighResTemplate.grd")
tar_load(vast_seasonal_data)
all_times = as.character(levels(vast_seasonal_data$VAST_YEAR_SEASON))
plot_times = NULL
tar_load(land_sf)
tar_load(region_shapefile)
mask = region_shapefile
land_color = "#d9d9d9"
res_data_path = "~/Box/RES_Data/"
xlim = c(-85, -55)
ylim = c(30, 50)
panel_or_gif = "gif"
panel_cols = NULL
panel_rows = NULL
}
# Plotting at spatial knots...
# Getting prediction array
pred_array<- log(vast_fit$Report$D_gct+1)
# Getting time info
if(!is.null(plot_times)){
plot_times<- all_times[which(all_times) %in% plot_times]
} else {
plot_times<- all_times
}
# Getting spatial information
spat_data<- vast_fit$extrapolation_list
loc_g<- spat_data$Data_Extrap[which(spat_data$Data_Extrap[, "Include"] > 0), c("Lon", "Lat")]
CRS_orig<- sp::CRS("+proj=longlat")
CRS_proj<- sp::CRS(spat_data$projargs)
land_sf<- st_crop(land_sf, xmin = xlim[1], ymin = ylim[1], xmax = xlim[2], ymax = ylim[2])
# Looping through...
rasts_out<- vector("list", dim(pred_array)[3])
rasts_range<- pred_array
rast_lims<- c(0, round(max(rasts_range) + 0.0000001, 2))
if(dim(pred_array)[3] == 1){
data_df<- data.frame(loc_g, z = pred_array[,1,])
# Interpolation
pred_df<- na.omit(data.frame("x" = data_df$Lon, "y" = data_df$Lat, "layer" = data_df$z))
pred_df_interp<- interp(pred_df[,1], pred_df[,2], pred_df[,3], duplicate = "mean", extrap = TRUE,
xo=seq(-87.99457, -57.4307, length = 115),
yo=seq(22.27352, 48.11657, length = 133))
pred_df_interp_final<- data.frame(expand.grid(x = pred_df_interp$x, y = pred_df_interp$y), z = c(round(pred_df_interp$z, 2)))
pred_sp<- st_as_sf(pred_df_interp_final, coords = c("x", "y"), crs = CRS_orig)
pred_df_temp<- pred_sp[which(st_intersects(pred_sp, mask, sparse = FALSE) == TRUE),]
coords_keep<- as.data.frame(st_coordinates(pred_df_temp))
row.names(coords_keep)<- NULL
pred_df_use<- data.frame(cbind(coords_keep, "z" = as.numeric(pred_df_temp$z)))
names(pred_df_use)<- c("x", "y", "z")
# raster_proj<- raster::rasterize(as_Spatial(points_ll), template, field = "z", fun = mean)
# raster_proj<- as.data.frame(raster_proj, xy = TRUE)
#
time_plot_use<- plot_times
plot_out<- ggplot() +
geom_tile(data = pred_df_use, aes(x = x, y = y, fill = z)) +
scale_fill_viridis_c(name = "Log (density+1)", option = "viridis", na.value = "transparent", limits = rast_lims) +
annotate("text", x = -65, y = 37.5, label = time_plot_use) +
geom_sf(data = land_sf, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = FALSE) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt"))
ggsave(filename = paste(out_dir, nice_category_names, ".png", sep = "/"), plot_out, width = 11, height = 8, units = "in")
} else {
for (tI in 1:dim(pred_array)[3]) {
data_df<- data.frame(loc_g, z = pred_array[,1,tI])
# Interpolation
pred_df<- na.omit(data.frame("x" = data_df$Lon, "y" = data_df$Lat, "layer" = data_df$z))
pred_df_interp<- interp(pred_df[,1], pred_df[,2], pred_df[,3], duplicate = "mean", extrap = TRUE,
xo=seq(-87.99457, -57.4307, length = 115),
yo=seq(22.27352, 48.11657, length = 133))
pred_df_interp_final<- data.frame(expand.grid(x = pred_df_interp$x, y = pred_df_interp$y), z = c(round(pred_df_interp$z, 2)))
pred_sp<- st_as_sf(pred_df_interp_final, coords = c("x", "y"), crs = CRS_orig)
pred_df_temp<- pred_sp[which(st_intersects(pred_sp, mask, sparse = FALSE) == TRUE),]
coords_keep<- as.data.frame(st_coordinates(pred_df_temp))
row.names(coords_keep)<- NULL
pred_df_use<- data.frame(cbind(coords_keep, "z" = as.numeric(pred_df_temp$z)))
names(pred_df_use)<- c("x", "y", "z")
# raster_proj<- raster::rasterize(as_Spatial(points_ll), template, field = "z", fun = mean)
# raster_proj<- as.data.frame(raster_proj, xy = TRUE)
#
time_plot_use<- plot_times[tI]
rasts_out[[tI]]<- ggplot() +
geom_tile(data = pred_df_use, aes(x = x, y = y, fill = z)) +
scale_fill_viridis_c(name = "Log (density+1)", option = "viridis", na.value = "transparent", limits = rast_lims) +
annotate("text", x = -65, y = 37.5, label = time_plot_use) +
geom_sf(data = land_sf, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = FALSE) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt"))
}
if(panel_or_gif == "panel"){
# Panel plot
all_plot<- wrap_plots(rasts_out, ncol = panel_cols, nrow = panel_rows, guides = "collect", theme(plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt")))
ggsave(filename = paste0(out_dir, "/", nice_category_names, "_LogDensity.png"), all_plot, width = 11, height = 8, units = "in")
return(all_plot)
} else {
# Make a gif
plot_loop_func<- function(plot_list){
for (i in seq_along(plot_list)) {
plot_use<- plot_list[[i]]
print(plot_use)
}
}
invisible(save_gif(plot_loop_func(rasts_out), paste0(out_dir, "/", nice_category_names, "_LogDensity.gif"), delay = 0.75, progress = FALSE))
}
}
}
#' @title Plot predicted density surfaces from data frame
#'
#' @description Creates either a panel plot or a gif of predicted density surfaces from a data frame that has location and time information
#'
#' @param pred_df = A dataframe with Lat, Lon, Time and Pred columns
#' @param nice_category_names = A
#' @param mask = Land mask
#' @param plot_times = Either NULL to make a plot for each time in `pred_df$Time` or a vector of all of the times to plot, which must be a subset of `pred_df$Time`
#' @param land_sf = Land sf object
#' @param xlim = A two element vector with the min and max longitudes
#' @param ylim = A two element vector with the min and max latitudes
#' @param panel_or_gif = A character string of either "panel" or "gif" indicating how the multiple plots across time steps should be displayed
#' @param out_dir = Output directory to save the panel plot or gif
#'
#' @return NULL. Panel or gif plot is saved in out_dir.
#'
#' @export
vast_df_plot_density<- function(pred_df, nice_category_names, mask, all_times = all_times, plot_times = NULL, land_sf, xlim, ylim, panel_or_gif = "gif", out_dir, land_color = "#d9d9d9", panel_cols = NULL, panel_rows = NULL, ...){
if(FALSE){
tar_load(vast_predictions)
pred_df = vast_predictions
plot_times = NULL
tar_load(land_sf)
tar_load(region_shapefile)
mask = region_shapefile
land_color = "#d9d9d9"
res_data_path = "~/Box/RES_Data/"
xlim = c(-80, -55)
ylim = c(35, 50)
panel_or_gif = "gif"
panel_cols = NULL
panel_rows = NULL
}
# Time ID column for filtering
pred_df<- pred_df %>%
mutate(., "Time_Filter" = as.numeric(Time))
# Log transform pred_df$Pred
pred_df$Pred<- log(pred_df$Pred+1)
# Getting all unique times
all_times<- unique(pred_df$Time)
# Getting time info
if(!is.null(plot_times)){
plot_times<- all_times[which(all_times) %in% plot_times]
} else {
plot_times<- all_times
}
# Getting spatial information
land_sf<- st_crop(land_sf, xmin = xlim[1], ymin = ylim[1], xmax = xlim[2], ymax = ylim[2])
# Looping through...
rasts_out<- vector("list", length(plot_times))
rasts_range<- pred_df$Pred
rast_lims<- c(0, round(max(rasts_range) + 0.0000001, 2))
for (tI in 1:length(plot_times)) {
pred_df_temp<- pred_df %>%
dplyr::filter(., Time_Filter == tI)
# Interpolation
pred_df_temp<- na.omit(data.frame("x" = pred_df_temp$Lon, "y" = pred_df_temp$Lat, "layer" = pred_df_temp$Pred))
pred_df_interp<- interp(pred_df_temp[,1], pred_df_temp[,2], pred_df_temp[,3], duplicate = "mean", extrap = TRUE,
xo=seq(-87.99457, -57.4307, length = 115),
yo=seq(22.27352, 48.11657, length = 133))
pred_df_interp_final<- data.frame(expand.grid(x = pred_df_interp$x, y = pred_df_interp$y), z = c(round(pred_df_interp$z, 2)))
pred_sp<- st_as_sf(pred_df_interp_final, coords = c("x", "y"), crs = 4326)
pred_df_temp2<- pred_sp[which(st_intersects(pred_sp, mask, sparse = FALSE) == TRUE),]
coords_keep<- as.data.frame(st_coordinates(pred_df_temp2))
row.names(coords_keep)<- NULL
pred_df_use<- data.frame(cbind(coords_keep, "z" = as.numeric(pred_df_temp2$z)))
names(pred_df_use)<- c("x", "y", "z")
# raster_proj<- raster::rasterize(as_Spatial(points_ll), template, field = "z", fun = mean)
# raster_proj<- as.data.frame(raster_proj, xy = TRUE)
#
time_plot_use<- plot_times[tI]
rasts_out[[tI]]<- ggplot() +
geom_tile(data = pred_df_use, aes(x = x, y = y, fill = z)) +
scale_fill_viridis_c(name = "Log (density+1)", option = "viridis", na.value = "transparent", limits = rast_lims) +
annotate("text", x = -65, y = 37.5, label = time_plot_use) +
geom_sf(data = land_sf, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = FALSE) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt"))
}
if(panel_or_gif == "panel"){
# Panel plot
all_plot<- wrap_plots(rasts_out, ncol = panel_cols, nrow = panel_rows, guides = "collect", theme(plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt")))
ggsave(filename = paste0(out_dir, "/", nice_category_names, "_LogDensity.png", sep = ""), all.plot, width = 11, height = 8, units = "in")
} else {
# Make a gif
plot_loop_func<- function(plot_list){
for (i in seq_along(plot_list)) {
plot_use<- plot_list[[i]]
print(plot_use)
}
}
invisible(save_gif(plot_loop_func(rasts_out), paste0(out_dir, "/", nice_category_names, "_LogDensity.gif"), delay = 0.75, progress = FALSE))
}
}
predict.fit_model_aja<- function(x, what = "D_i", Lat_i, Lon_i, t_i, a_i, c_iz = rep(0,length(t_i)), v_i = rep(0,length(t_i)), new_covariate_data = NULL, new_catchability_data = NULL, do_checks = TRUE, working_dir = paste0(getwd(),"/")){
if(FALSE){
tar_load(vast_fit)
x = vast_fit
what = "D_i"
Lat_i = x$data_frame$Lat_i
#Lat_i = pred_cov_dat_use$Lat
Lon_i = x$data_frame$Lon_i
#Lon_i = pred_cov_dat_use$Lon
t_i = x$data_frame$t_i
#t_i = pred_cov_dat_use$Year
a_i<- x$data_frame$a_i
#a_i<- rep(unique(pred_sampled_areas), length(Lat_i))
c_iz = rep(0,length(t_i))
#c_iz<- rep(unique(predict_category), length(Lat_i))
v_i = rep(0,length(t_i))
#v_i<- rep(unique(predict_vessel), length(t_i))
new_covariate_data = NULL
#new_covariate_data = pred_cov_dat_use
new_catchability_data = NULL
#new_catchability_data = pred_catch_dat_use
do_checks = FALSE
x = vast_fit
what = "Index_gctl"
Lat_i = predict_covariates_df_all[,"DECDEG_BEGLAT"]
Lon_i = predict_covariates_df_all[,"DECDEG_BEGLON"]
t_i = predict_covariates_df_all[,"t_i"]
a_i = predict_covariates_df_all[,"a_i"]
c_iz = predict_covariates_df_all[,"c_iz"]
v_i = predict_covariates_df_all[,"v_i"]
new_covariate_data = pred_cov_dat_use
new_catchability_data = pred_catch_dat_use
do_checks = FALSE
working_dir = paste0(getwd(),"/")
# object = vast_fit
# x = object
# Lat_i = object$data_frame$Lat_i
# Lon_i = object$data_frame$Lon_i
# t_i = object$data_frame$t_i
# a_i = object$data_frame$a_i
# c_iz = rep(0,length(t_i))
# v_i = rep(0,length(t_i))
# what = "P1_iz"
# new_covariate_data = object$covariate_data
# new_catchability_data = object$catchability_data
# do_checks = FALSE
x = vast_fitted_sdm
what = predict_variable
Lat_i = pred_lats
Lon_i = pred_lons
t_i = pred_times
a_i = pred_sampled_areas
c_iz = pred_category
v_i = rep(0,length(t_i))
new_covariate_data = pred_cov_dat_use
new_catchability_data = pred_catch_dat_use
do_checks = FALSE
working_dir = paste0(getwd(), "/")
}
message("`predict.fit_model(.)` is in beta-testing, and please explore results carefully prior to using")
# Check issues
if( !(what%in%names(x$Report)) || (length(x$Report[[what]])!=x$data_list$n_i) ){
stop("`what` can only take a few options")
}
if( !is.null(new_covariate_data) ){
# Confirm all columns are available
if( !all(colnames(x$covariate_data) %in% colnames(new_covariate_data)) ){
stop("Please ensure that all columns of `x$covariate_data` are present in `new_covariate_data`")
}
# Eliminate unnecessary columns
new_covariate_data = new_covariate_data[,match(colnames(x$covariate_data),colnames(new_covariate_data))]
# Eliminate old-covariates that are also present in new_covariate_data
NN = RANN::nn2( query=x$covariate_data[,c('Lat','Lon','Year')], data=new_covariate_data[,c('Lat','Lon','Year')], k=1 )
if( any(NN$nn.dist==0) ){
x$covariate_data = x$covariate_data[-which(NN$nn.dist==0),,drop=FALSE]
}
}
if( !is.null(new_catchability_data) ){
# Confirm all columns are available
if( !all(colnames(x$catchability_data) %in% colnames(new_catchability_data)) ){
stop("Please ensure that all columns of `x$catchability_data` are present in `new_covariate_data`")
}
# Eliminate unnecessary columns
new_catchability_data = new_catchability_data[,match(colnames(x$catchability_data),colnames(new_catchability_data))]
# Eliminate old-covariates that are also present in new_covariate_data
NN = RANN::nn2( query=x$catchability_data[,c('Lat','Lon','Year')], data=new_catchability_data[,c('Lat','Lon','Year')], k=1 )
if( any(NN$nn.dist==0) ){
x$catchability_data = x$catchability_data[-which(NN$nn.dist==0),,drop=FALSE]
}
}
# Process covariates
covariate_data = rbind( x$covariate_data, new_covariate_data )
catchability_data = rbind( x$catchability_data, new_catchability_data )
# Process inputs
PredTF_i = c( x$data_list$PredTF_i, rep(1,length(t_i)) )
b_i = c( x$data_frame[,"b_i"], sample(c(0, 1), size = length(t_i), replace = TRUE))
c_iz = rbind( matrix(x$data_frame[,grep("c_iz",names(x$data_frame))]), matrix(c_iz) )
Lat_i = c( x$data_frame[,"Lat_i"], Lat_i )
Lon_i = c( x$data_frame[,"Lon_i"], Lon_i )
a_i = c( x$data_frame[,"a_i"], a_i )
v_i = c( x$data_frame[,"v_i"], v_i )
t_i = c( x$data_frame[,"t_i"], t_i )
#assign("b_i", b_i, envir=.GlobalEnv)
# Build information regarding spatial location and correlation
message("\n### Re-making spatial information")
spatial_args_new = list("anisotropic_mesh"=x$spatial_list$MeshList$anisotropic_mesh, "Kmeans"=x$spatial_list$Kmeans, "Lon_i"=Lon_i, "Lat_i"=Lat_i )
spatial_args_input = combine_lists( input=spatial_args_new, default=x$input_args$spatial_args_input )
spatial_list = do.call( what=make_spatial_info, args=spatial_args_input )
# Check spatial_list
if( !all.equal(spatial_list$MeshList,x$spatial_list$MeshList) ){
stop("`MeshList` generated during `predict.fit_model` doesn't match that of original fit; please email package author to report issue")
}
# Build data
# Do *not* restrict inputs to formalArgs(make_data) because other potential inputs are still parsed by make_data for backwards compatibility
message("\n### Re-making data object")
data_args_new = list( "c_iz"=c_iz, "b_i"=b_i, "a_i"=a_i, "v_i"=v_i, "PredTF_i"=PredTF_i,
"t_i"=t_i, "spatial_list"=spatial_list,
"covariate_data"=covariate_data, "catchability_data"=catchability_data )
data_args_input = combine_lists( input=data_args_new, default=x$input_args$data_args_input ) # Do *not* use args_to_use
data_list = do.call( what=make_data, args=data_args_input )
data_list$n_g = 0
# Build object
message("\n### Re-making TMB object")
model_args_default = list("TmbData"=data_list, "RunDir"=working_dir, "Version"=x$settings$Version, "RhoConfig"=x$settings$RhoConfig, "loc_x"=spatial_list$loc_x, "Method"=spatial_list$Method, "Map" = x$tmb_list$Map)
model_args_input = combine_lists( input=list("Parameters"=x$ParHat),
default=model_args_default, args_to_use=formalArgs(make_model) )
tmb_list = do.call( what=make_model, args=model_args_input )
# Extract output
Report = tmb_list$Obj$report()
Y_i = Report[[what]][(1+nrow(x$data_frame)):length(Report$D_i)]
# sanity check
#if( all.equal(covariate_data,x$covariate_data) & Report$jnll!=x$Report$jnll){
if( do_checks==TRUE && (Report$jnll!=x$Report$jnll) ){
message("Problem detected in `predict.fit_model`; returning outputs for diagnostic purposes")
Return = list("Report"=Report, "data_list"=data_list)
return(Return)
}
# return prediction
return(Y_i)
}
match_strata_fn_aja <- function(points, strata_dataframe, index_shapes) {
if(FALSE){
points = Tmp
l = 1
strata_dataframe = strata.limits[l, , drop = FALSE]
index_shapes = index_shapes
}
if(is.null(index_shapes)){
# Default all strata
match_latitude_TF = match_longitude_TF = match_depth_TF = rep( TRUE, nrow(strata_dataframe))
if( all(c("south_border","north_border") %in% names(strata_dataframe)) ){
match_latitude_TF = as.numeric(x["BEST_LAT_DD"])>strata_dataframe[,'south_border'] & as.numeric(x["BEST_LAT_DD"])<=strata_dataframe[,'north_border']
}
if( all(c("west_border","east_border") %in% names(strata_dataframe)) ){
match_longitude_TF = as.numeric(x["BEST_LON_DD"])>strata_dataframe[,'west_border'] & as.numeric(x["BEST_LON_DD"])<=strata_dataframe[,'east_border']
}
if( all(c("shallow_border","deep_border") %in% names(strata_dataframe)) ){
match_depth_TF = as.numeric(x["BEST_DEPTH_M"])>strata_dataframe[,'shallow_border'] & as.numeric(x["BEST_DEPTH_M"])<=strata_dataframe[,'deep_border']
}
# Return stuff
Char = as.character(strata_dataframe[match_latitude_TF & match_longitude_TF & match_depth_TF,"STRATA"])
return(ifelse(length(Char)==0,NA,Char))
}
# Andrew edit...
if(!is.null(index_shapes)){
Tmp_sf<- data.frame(points) %>%
st_as_sf(., coords = c("BEST_LON_DD", "BEST_LAT_DD"), crs = st_crs(index_shapes), remove = FALSE)
match_shape<- Tmp_sf %>%
st_join(., index_shapes, join = st_within) %>%
mutate(., "Row_ID" = seq(from = 1, to = nrow(.))) %>%
st_drop_geometry() %>%
dplyr::select(., Region) %>%
as.vector()
return(match_shape)
}
}
Prepare_User_Extrapolation_Data_Fn_aja<- function (input_grid, strata.limits = NULL, projargs = NA, zone = NA, flip_around_dateline = TRUE, index_shapes, ...) {
if(FALSE){
# Run make_extrapolation_info_aja first...
strata.limits = strata.limits
input_grid = input_grid
projargs = projargs
zone = zone
flip_around_dateline = flip_around_dateline
index_shapes = index_shapes
}
if (is.null(strata.limits)) {
strata.limits = data.frame(STRATA = "All_areas")
}
message("Using strata ", strata.limits)
Data_Extrap <- input_grid
Area_km2_x = Data_Extrap[, "Area_km2"]
Tmp = cbind(BEST_LAT_DD = Data_Extrap[, "Lat"], BEST_LON_DD = Data_Extrap[, "Lon"])
if ("Depth" %in% colnames(Data_Extrap)) {
Tmp = cbind(Tmp, BEST_DEPTH_M = Data_Extrap[, "Depth"])
}
a_el = as.data.frame(matrix(NA, nrow = nrow(Data_Extrap), ncol = nrow(strata.limits), dimnames = list(NULL, strata.limits[, "STRATA"])))
for (l in 1:ncol(a_el)) {
a_el[, l] = match_strata_fn_aja(points = Tmp, strata_dataframe = strata.limits[l, , drop = FALSE], index_shapes = index_shapes[index_shapes$Region == as.character(strata.limits[l, , drop = FALSE]),])
a_el[, l] = ifelse(is.na(a_el[, l]), 0, Area_km2_x)
}
tmpUTM = project_coordinates(X = Data_Extrap[, "Lon"], Y = Data_Extrap[, "Lat"], projargs = projargs, zone = zone, flip_around_dateline = flip_around_dateline)
Data_Extrap = cbind(Data_Extrap, Include = 1)
if (all(c("E_km", "N_km") %in% colnames(Data_Extrap))) {
Data_Extrap[, c("E_km", "N_km")] = tmpUTM[, c("X", "Y")]
} else {
Data_Extrap = cbind(Data_Extrap, E_km = tmpUTM[, "X"], N_km = tmpUTM[, "Y"])
}
Return = list(a_el = a_el, Data_Extrap = Data_Extrap, zone = attr(tmpUTM, "zone"), projargs = attr(tmpUTM, "projargs"), flip_around_dateline = flip_around_dateline, Area_km2_x = Area_km2_x)
return(Return)
}
make_extrapolation_info_aja<- function (Region, projargs = NA, zone = NA, strata.limits = data.frame(STRATA = "All_areas"), create_strata_per_region = FALSE, max_cells = NULL, input_grid = NULL, observations_LL = NULL, grid_dim_km = c(2, 2), maximum_distance_from_sample = NULL, grid_in_UTM = TRUE, grid_dim_LL = c(0.1, 0.1), region = c("south_coast", "west_coast"), strata_to_use = c("SOG", "WCVI", "QCS", "HS", "WCHG"), epu_to_use = c("All", "Georges_Bank", "Mid_Atlantic_Bight", "Scotian_Shelf", "Gulf_of_Maine", "Other")[1], survey = "Chatham_rise", surveyname = "propInWCGBTS", flip_around_dateline, nstart = 100, area_tolerance = 0.05, backwards_compatible_kmeans = FALSE, DirPath = paste0(getwd(), "/"), index_shapes, ...) {
if(FALSE){
# First run fit_model_aja...
Region = settings$Region
projargs = NA
zone = settings$zone
strata.limits = settings$strata.limits
create_strata_per_region = FALSE
max_cells = settings$max_cells
input_grid = input_grid
observations_LL = NULL
grid_dim_km = settings$grid_size_km
maximum_distance_from_sample = NULL
index_shapes = index_shapes
}
if (is.null(max_cells))
max_cells = Inf
for (rI in seq_along(Region)) {
Extrapolation_List = NULL
if (tolower(Region[rI]) == "user") {
if (is.null(input_grid)) {
stop("Because you're using a user-supplied region, please provide 'input_grid' input")
}
if (!(all(c("Lat", "Lon", "Area_km2") %in% colnames(input_grid)))) {
stop("'input_grid' must contain columns named 'Lat', 'Lon', and 'Area_km2'")
}
if (missing(flip_around_dateline))
flip_around_dateline = FALSE
Extrapolation_List = Prepare_User_Extrapolation_Data_Fn_aja(strata.limits = strata.limits, input_grid = input_grid, projargs = projargs, zone = zone, flip_around_dateline = flip_around_dateline, index_shapes = index_shapes, ...)
}
if (is.null(Extrapolation_List)) {
if (is.null(observations_LL)) {
stop("Because you're using a new Region[rI], please provide 'observations_LL' input with columns named `Lat` and `Lon`")
}
if (missing(flip_around_dateline))
flip_around_dateline = FALSE
Extrapolation_List = Prepare_Other_Extrapolation_Data_Fn(strata.limits = strata.limits, observations_LL = observations_LL, grid_dim_km = grid_dim_km, maximum_distance_from_sample = maximum_distance_from_sample, grid_in_UTM = grid_in_UTM, grid_dim_LL = grid_dim_LL, projargs = projargs, zone = zone, flip_around_dateline = flip_around_dateline, ...)
}
if (rI == 1) {
Return = Extrapolation_List
} else {
Return = combine_extrapolation_info(Return, Extrapolation_List, create_strata_per_region = create_strata_per_region)
}
}
if (max_cells < nrow(Return$Data_Extrap)) {
message("# Reducing extrapolation-grid from ", nrow(Return$Data_Extrap), " to ", max_cells, " cells for Region(s): ", paste(Region, collapse = ", "))
loc_orig = Return$Data_Extrap[, c("E_km", "N_km")]
loc_orig = loc_orig[which(Return$Area_km2_x > 0), ]
Kmeans = make_kmeans(n_x = max_cells, loc_orig = loc_orig, nstart = nstart, randomseed = 1, iter.max = 1000, DirPath = DirPath, Save_Results = TRUE, kmeans_purpose = "extrapolation", backwards_compatible_kmeans = backwards_compatible_kmeans)
Kmeans[["cluster"]] = RANN::nn2(data = Kmeans[["centers"]], query = Return$Data_Extrap[, c("E_km", "N_km")], k = 1)$nn.idx[, 1]
aggregate_vector = function(values_x, index_x, max_index, FUN = sum) {
tapply(values_x, INDEX = factor(index_x, levels = 1:max_index), FUN = FUN)
}
a_el = matrix(NA, nrow = max_cells, ncol = ncol(Return$a_el))
for (lI in 1:ncol(Return$a_el)) {
a_el[, lI] = aggregate_vector(values_x = Return$a_el[, lI], index_x = Kmeans$cluster, max_index = max_cells)
}
Area_km2_x = aggregate_vector(values_x = Return$Area_km2_x, index_x = Kmeans$cluster, max_index = max_cells)
Include = aggregate_vector(values_x = Return$Data_Extrap[, "Include"], index_x = Kmeans$cluster, max_index = max_cells, FUN = function(vec) {
any(vec > 0)
})
lonlat_g = project_coordinates(X = Kmeans$centers[, "E_km"], Y = Kmeans$centers[, "N_km"], projargs = "+proj=longlat +ellps=WGS84", origargs = Return$projargs)
Data_Extrap = cbind(Lon = lonlat_g[, 1], Lat = lonlat_g[, 2], Include = Include, Kmeans$centers)
Return = list(a_el = a_el, Data_Extrap = Data_Extrap, zone = Return$zone, projargs = Return$projargs, flip_around_dateline = Return$flip_around_dateline, Area_km2_x = Area_km2_x)
}
if (length(Region) > 1 & create_strata_per_region == TRUE) {
Return$a_el = cbind(Total = rowSums(Return$a_el), Return$a_el)
}
class(Return) = "make_extrapolation_info"
return(Return)
}
fit_model_aja<- function (settings, Method, Lat_i, Lon_i, t_i, b_i, a_i, c_iz = rep(0, length(b_i)), v_i = rep(0, length(b_i)), working_dir = paste0(getwd(), "/"), X1config_cp = NULL, X2config_cp = NULL, covariate_data, X1_formula = ~0, X2_formula = ~0, Q1config_k = NULL, Q2config_k = NULL, catchability_data, Q1_formula = ~0, Q2_formula = ~0, newtonsteps = 1, silent = TRUE, build_model = TRUE, run_model = TRUE, test_fit = TRUE, ...) {
if(FALSE){
#Run vast_fit_sdm first...
"settings" = settings
"input_grid" = extrap_grid
"Lat_i" = sample_data[, 'Lat']
"Lon_i" = sample_data[, 'Lon']
"t_i" = sample_data[, 'Year']
"c_i" = rep(0, nrow(sample_data))
"b_i" = sample_data[, 'Biomass']
"v_i" = rep(0, length(b_i))
"a_i" = sample_data[, 'Swept']
"PredTF_i" = sample_data[, 'Pred_TF']
"X1config_cp" = Xconfig_list[['X1config_cp']]
"X2config_cp" = Xconfig_list[['X2config_cp']]
"covariate_data" = covariate_data
"X1_formula" = X1_formula
"X2_formula" = X2_formula
"X_contrasts" = X_contrasts
"catchability_data" = catchability_data
"Q1_formula" = Q1_formula
"Q2_formula" = Q2_formula
"Q1config_k" = Xconfig_list[['Q1config_k']]
"Q2config_k" = Xconfig_list[['Q2config_k']]
"newtonsteps" = 1
"getsd" = TRUE
"getReportCovariance" = TRUE
"run_model" = FALSE
"test_fit" = FALSE
"Use_REML" = FALSE
"getJointPrecision" = FALSE
"index_shapes" = index_shapes
# Now, go into make_extrapolation_info_aja
}
extra_args = list(...)
extra_args = c(extra_args, extra_args$extrapolation_args, extra_args$spatial_args, extra_args$optimize_args, extra_args$model_args)
data_frame = data.frame(Lat_i = Lat_i, Lon_i = Lon_i, a_i = a_i, v_i = v_i, b_i = b_i, t_i = t_i, c_iz = c_iz)
year_labels = seq(min(t_i), max(t_i))
years_to_plot = which(year_labels %in% t_i)
message("\n### Writing output from `fit_model` in directory: ", working_dir)
dir.create(working_dir, showWarnings = FALSE, recursive = TRUE)
capture.output(settings, file = file.path(working_dir, "settings.txt"))
message("\n### Making extrapolation-grid")
extrapolation_args_default = list(Region = settings$Region, strata.limits = settings$strata.limits, zone = settings$zone, max_cells = settings$max_cells, DirPath = working_dir)
extrapolation_args_input = combine_lists(input = extra_args, default = extrapolation_args_default, args_to_use = formalArgs(make_extrapolation_info_aja))
extrapolation_list = do.call(what = make_extrapolation_info_aja, args = extrapolation_args_input)
message("\n### Making spatial information")
spatial_args_default = list(grid_size_km = settings$grid_size_km, n_x = settings$n_x, Method = Method, Lon_i = Lon_i, Lat_i = Lat_i, Extrapolation_List = extrapolation_list, DirPath = working_dir, Save_Results = TRUE, fine_scale = settings$fine_scale, knot_method = settings$knot_method)
spatial_args_input = combine_lists(input = extra_args, default = spatial_args_default, args_to_use = c(formalArgs(make_spatial_info), formalArgs(INLA::inla.mesh.create)))
spatial_list = do.call(what = make_spatial_info, args = spatial_args_input)
message("\n### Making data object")
if (missing(covariate_data))
covariate_data = NULL
if (missing(catchability_data))
catchability_data = NULL
data_args_default = list(Version = settings$Version, FieldConfig = settings$FieldConfig, OverdispersionConfig = settings$OverdispersionConfig, RhoConfig = settings$RhoConfig, VamConfig = settings$VamConfig, ObsModel = settings$ObsModel, c_iz = c_iz, b_i = b_i, a_i = a_i, v_i = v_i, s_i = spatial_list$knot_i - 1, t_i = t_i, spatial_list = spatial_list, Options = settings$Options, Aniso = settings$use_anisotropy, X1config_cp = X1config_cp, X2config_cp = X2config_cp, covariate_data = covariate_data, X1_formula = X1_formula, X2_formula = X2_formula, Q1config_k = Q1config_k, Q2config_k = Q2config_k, catchability_data = catchability_data, Q1_formula = Q1_formula, Q2_formula = Q2_formula)
data_args_input = combine_lists(input = extra_args, default = data_args_default)
data_list = do.call(what = make_data, args = data_args_input)
message("\n### Making TMB object")
model_args_default = list(TmbData = data_list, RunDir = working_dir, Version = settings$Version, RhoConfig = settings$RhoConfig, loc_x = spatial_list$loc_x, Method = spatial_list$Method, build_model = build_model)
model_args_input = combine_lists(input = extra_args, default = model_args_default, args_to_use = formalArgs(make_model))
tmb_list = do.call(what = make_model, args = model_args_input)
if (run_model == FALSE | build_model == FALSE) {
input_args = list(extra_args = extra_args, extrapolation_args_input = extrapolation_args_input, model_args_input = model_args_input, spatial_args_input = spatial_args_input, data_args_input = data_args_input)
Return = list(data_frame = data_frame, extrapolation_list = extrapolation_list, spatial_list = spatial_list, data_list = data_list, tmb_list = tmb_list, year_labels = year_labels, years_to_plot = years_to_plot, settings = settings, input_args = input_args)
class(Return) = "fit_model"
return(Return)
}
if (silent == TRUE)
tmb_list$Obj$env$beSilent()
if (test_fit == TRUE) {
message("\n### Testing model at initial values")
LogLike0 = tmb_list$Obj$fn(tmb_list$Obj$par)
Gradient0 = tmb_list$Obj$gr(tmb_list$Obj$par)
if (any(Gradient0 == 0)) {
message("\n")
stop("Please check model structure; some parameter has a gradient of zero at starting values\n",
call. = FALSE)
} else {
message("Looks good: All fixed effects have a nonzero gradient")
}
}
message("\n### Estimating parameters")
optimize_args_default1 = list(lower = tmb_list$Lower, upper = tmb_list$Upper, loopnum = 2)
optimize_args_default1 = combine_lists(default = optimize_args_default1, input = extra_args, args_to_use = formalArgs(TMBhelper::fit_tmb))
optimize_args_input1 = list(obj = tmb_list$Obj, savedir = NULL, newtonsteps = 0, bias.correct = FALSE, control = list(eval.max = 10000, iter.max = 10000, trace = 1), quiet = TRUE, getsd = FALSE)
optimize_args_input1 = combine_lists(default = optimize_args_default1, input = optimize_args_input1, args_to_use = formalArgs(TMBhelper::fit_tmb))
parameter_estimates = do.call(what = TMBhelper::fit_tmb, args = optimize_args_input1)
if (exists("check_fit") & test_fit == TRUE) {
problem_found = VAST::check_fit(parameter_estimates)
if (problem_found == TRUE) {
message("\n")
stop("Please change model structure to avoid problems with parameter estimates and then re-try; see details in `?check_fit`\n", call. = FALSE)
}
}
optimize_args_default2 = list(obj = tmb_list$Obj, lower = tmb_list$Lower, upper = tmb_list$Upper, savedir = working_dir, bias.correct = settings$bias.correct, newtonsteps = newtonsteps, bias.correct.control = list(sd = FALSE, split = NULL, nsplit = 1, vars_to_correct = settings$vars_to_correct), control = list(eval.max = 10000, iter.max = 10000, trace = 1), loopnum = 1, getJointPrecision = TRUE)
optimize_args_input2 = combine_lists(input = extra_args, default = optimize_args_default2, args_to_use = formalArgs(TMBhelper::fit_tmb))
optimize_args_input2 = combine_lists(input = list(startpar = parameter_estimates$par), default = optimize_args_input2)
parameter_estimates = do.call(what = TMBhelper::fit_tmb, args = optimize_args_input2)
if ("par" %in% names(parameter_estimates)) {
Report = tmb_list$Obj$report()
ParHat = tmb_list$Obj$env$parList(parameter_estimates$par)
} else {
Report = ParHat = "Model is not converged"
}
input_args = list(extra_args = extra_args, extrapolation_args_input = extrapolation_args_input, model_args_input = model_args_input, spatial_args_input = spatial_args_input, optimize_args_input1 = optimize_args_input1, optimize_args_input2 = optimize_args_input2, data_args_input = data_args_input)
Return = list(data_frame = data_frame, extrapolation_list = extrapolation_list, spatial_list = spatial_list, data_list = data_list, tmb_list = tmb_list, parameter_estimates = parameter_estimates, Report = Report, ParHat = ParHat, year_labels = year_labels, years_to_plot = years_to_plot, settings = settings, input_args = input_args, X1config_cp = X1config_cp, X2config_cp = X2config_cp, covariate_data = covariate_data, X1_formula = X1_formula, X2_formula = X2_formula, Q1config_k = Q1config_k, Q2config_k = Q1config_k, catchability_data = catchability_data, Q1_formula = Q1_formula, Q2_formula = Q2_formula)
Return$effects = list()
if (!is.null(catchability_data)) {
catchability_data_full = data.frame(catchability_data, linear_predictor = 0)
Q1_formula_full = update.formula(Q1_formula, linear_predictor ~ . + 0)
call_Q1 = lm(Q1_formula_full, data = catchability_data_full)$call
Q2_formula_full = update.formula(Q2_formula, linear_predictor ~ . + 0)
call_Q2 = lm(Q2_formula_full, data = catchability_data_full)$call
Return$effects = c(Return$effects, list(call_Q1 = call_Q1, call_Q2 = call_Q2, catchability_data_full = catchability_data_full))
}
if (!is.null(covariate_data)) {
covariate_data_full = data.frame(covariate_data, linear_predictor = 0)
X1_formula_full = update.formula(X1_formula, linear_predictor ~ . + 0)
call_X1 = lm(X1_formula_full, data = covariate_data_full)$call
X2_formula_full = update.formula(X2_formula, linear_predictor ~ . + 0)
call_X2 = lm(X2_formula_full, data = covariate_data_full)$call
Return$effects = c(Return$effects, list(call_X1 = call_X1, call_X2 = call_X2, covariate_data_full = covariate_data_full))
}
class(Return) = "fit_model"
return(Return)
}
vast_read_region_shape<- function(region_shapefile_dir){
region_file<- list.files(region_shapefile_dir, pattern = ".shp", full.names = TRUE)
region_sf<- st_read(region_file)
return(region_sf)
}
vast_read_index_shapes<- function(index_shapefiles_dir){
if(FALSE){
index_shapefiles_dir<- "~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/data/supporting/index_shapefiles/"
index_shapefiles_dir<- "~/data/supporting/index_shapefiles/"
}
index_files<- list.files(index_shapefiles_dir, pattern = ".shp", full.names = TRUE)
for(i in seq_along(index_files)){
index_shapes_temp<- st_read(index_files[i])
if(i == 1){
index_shapes_out<- index_shapes_temp
} else {
index_shapes_out<- bind_rows(index_shapes_out, index_shapes_temp)
}
}
return(index_shapes_out)
}
######
## Getting abundance index time series
######
get_vast_index_timeseries<- function(vast_fit, all_times, nice_category_names, index_scale = c("raw", "log"), out_dir){
if(FALSE){
tar_load(vast_fit)
all_times = levels(vast_seasonal_data$VAST_YEAR_SEASON)
nice_category_names = "American lobster"
index_scale = "raw"
out_dir = paste0(res_root, "tables")
tar_load(vast_fit)
vast_fit = vast_fitted
nice_category_names = "Atlantic halibut"
index_scale = "raw"
out_dir = here::here("scratch/aja/TargetsSDM/results/tables")
}
TmbData<- vast_fit$data_list
Sdreport<- vast_fit$parameter_estimates$SD
# Time series steps
time_ind<- 1:TmbData$n_t
time_labels<- sort(unique(vast_fit$data_frame$t_i)[time_ind])
# Index regions
index_regions_ind<- 1:TmbData$n_l
index_regions<- vast_fit$settings$strata.limits$STRATA[index_regions_ind]
# Categories
categories_ind<- 1:TmbData$n_c
# Get the index information
SD<- TMB::summary.sdreport(Sdreport)
SD_stderr<- TMB:::as.list.sdreport(Sdreport, what = "Std. Error", report = TRUE)
SD_estimate<- TMB:::as.list.sdreport(Sdreport, what = "Estimate", report = TRUE)
if(vast_fit$settings$bias.correct == TRUE && "unbiased" %in% names(Sdreport)){
SD_estimate_biascorrect<- TMB:::as.list.sdreport(Sdreport, what = "Std. (bias.correct)", report = TRUE)
}
# Now, populate array with values
Index_ctl = log_Index_ctl = array(NA, dim = c(unlist(TmbData[c('n_c','n_t','n_l')]), 2), dimnames = list(categories_ind, time_labels, index_regions, c('Estimate','Std. Error')))
if(index_scale == "raw"){
if(vast_fit$settings$bias.correct == TRUE && "unbiased" %in% names(Sdreport)){
Index_ctl[] = SD[which(rownames(SD) == "Index_ctl"),c('Est. (bias.correct)','Std. Error')]
} else {
Index_ctl[]<- SD[which(rownames(SD) == "Index_ctl"), c('Estimate','Std. Error')]
}
index_res_array<- Index_ctl
} else {
if(vast_fit$settings$bias.correct == TRUE && "unbiased" %in% names(Sdreport)){
log_Index_ctl[] = SD[which(rownames(SD) == "ln_Index_ctl"),c('Est. (bias.correct)','Std. Error')]
} else {
log_Index_ctl[]<- SD[which(rownames(SD) == "ln_Index_ctl"), c('Estimate','Std. Error')]
}
index_res_array<- log_Index_ctl
}
# Data manipulation to get out out the array and to something more "plottable"
for(i in seq_along(categories_ind)){
index_array_temp<- index_res_array[i, , , ]
index_res_temp_est<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,1]) %>%
pivot_longer(cols = -c(Time, Category), names_to = "Index_Region", values_to = "Index_Estimate")
index_res_temp_sd<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,2]) %>%
pivot_longer(cols = -c(Time, Category), names_to = "Index_Region", values_to = "Index_SD")
index_res_temp_out<- index_res_temp_est %>%
left_join(., index_res_temp_sd)
if(i == 1){
index_res_out<- index_res_temp_out
} else {
index_res_out<- bind_rows(index_res_out, index_res_temp_out)
}
# if(dim(index_array_temp)[2] == 3){
# index_res_temp_est<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,1]) %>%
# pivot_longer(cols = -c(Time, Category), names_to = "Index_Region", values_to = "Index_Estimate")
# index_res_temp_sd<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,2]) %>%
# pivot_longer(cols = -c(Time, Category), names_to = "Index_Region", values_to = "Index_SD")
# index_res_temp_out<- index_res_temp_est %>%
# left_join(., index_res_temp_sd)
#
# if(i == 1){
# index_res_out<- index_res_temp_out
# } else {
# index_res_out<- bind_rows(index_res_out, index_res_temp_out)
# }
# } else if(as.numeric(dim(index_array_temp)[2]) == 2){
# index_res_temp_est<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,1]) %>%
# pivot_longer(cols = -c(Time, Category), names_to = "Index_Region", values_to = "Index_Estimate")
# index_res_temp_sd<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,2]) %>%
# pivot_longer(cols = -c(Time, Category), names_to = "Index_Region", values_to = "Index_SD")
# index_res_temp_out<- index_res_temp_est %>%
# left_join(., index_res_temp_sd)
#
# if(i == 1){
# index_res_out<- index_res_temp_out
# } else {
# index_res_out<- bind_rows(index_res_out, index_res_temp_out)
# }
# }
}
# if(!is.null(vast_fit$covariate_data)){
# year_start<- min(as.numeric(as.character(vast_fit$covariate_data$Year_Cov)))
#
# if(any(grepl("Season", vast_fit$X1_formula))){
# seasons<- nlevels(unique(vast_fit$covariate_data$Season))
# if(seasons == 3 & max(time_labels) == 347){
# time_labels_use<- paste(rep(seq(from = year_start, to = 2100), each = 3), rep(c("SPRING", "SUMMER", "FALL")), sep = "-")
# }
# } else {
# time_labels_use<- paste(rep(seq(from = year_start, to = 2100), each = 1), rep(c("FALL")), sep = "-")
# }
#
# index_res_out$Date<- factor(rep(time_labels_use, length(index_regions)), levels = time_labels_use)
#
# } else {
# # Just basic years...
# time_labels_use<- seq(from = min(vast_fit$year_labels), to = max(vast_fit$year_labels))
# index_res_out$Date<- factor(rep(time_labels_use, each = length(index_regions)), levels = time_labels_use)
# }
#
index_res_out$Date<- rep(factor(all_times, levels = all_times), each = length(unique(index_res_out$Index_Region)))
# Date info
index_res_out<- index_res_out %>%
mutate(., Year = as.numeric(gsub("([0-9]+).*$", "\\1", Date)))
if(any(str_detect(as.character(index_res_out$Date), LETTERS))){
index_res_out$Date<- as.Date(paste(index_res_out$Year, ifelse(grepl("SPRING", index_res_out$Date), "-04-15",
ifelse(grepl("SUMMER", index_res_out$Date), "-07-15", "-10-15")), sep = ""))
} else {
index_res_out$Date<- as.Date(paste(index_res_out$Year, "-06-15", sep = ""))
}
# Save and return it
write.csv(index_res_out, file = paste(out_dir, "/Biomass_Index_", index_scale, "_", nice_category_names, ".csv", sep = ""))
return(index_res_out)
}
plot_vast_index_timeseries<- function(index_res_df, year_stop = NULL, index_scale, nice_category_names, nice_xlab, nice_ylab, paneling = c("category", "index_region", "none"), color_pal = c('#66c2a5','#fc8d62','#8da0cb'), out_dir){
if(FALSE){
tar_load(biomass_indices)
index_res_df<- index_res_out
index_res_df<- biomass_indices
nice_category_names<- "American lobster"
nice_xlab = "Year-Season"
nice_ylab = "Biomass index (metric tons)"
color_pal = NULL
paneling<- "none"
date_breaks<- "5 year"
out_dir = paste0(res_root, "plots_maps")
}
if(paneling == "none"){
if(!is.null(color_pal)){
colors_use<- color_pal
} else {
color_pal<- c('#66c2a5','#fc8d62','#8da0cb','#e78ac3','#a6d854')
colors_use<- color_pal[1:length(unique(index_res_df$Index_Region))]
}
# Filter based on years to plot
if(!is.null(year_stop)){
index_res_df<- index_res_df %>%
filter(., Year < year_stop)
}
plot_out<- ggplot() +
geom_errorbar(data = index_res_df, aes(x = Date, ymin = (Index_Estimate - Index_SD), ymax = (Index_Estimate + Index_SD), color = Index_Region, group = Index_Region)) +
geom_point(data = index_res_df, aes(x = Date, y = Index_Estimate, color = Index_Region)) +
scale_color_manual(values = colors_use) +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
xlab({{nice_xlab}}) +
ylab({{nice_ylab}}) +
ggtitle({{nice_category_names}}) +
theme_bw() +
theme(legend.title = element_blank(), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1))
}
# Save and return the plot
ggsave(plot_out, file = paste(out_dir, "/Biomass_Index_", index_scale, "_", nice_category_names, ".jpg", sep = ""))
return(plot_out)
}
######
## Plot parameter effects...
######
#' @title Adapts package \code{effects}
#'
#' @inheritParams effects::Effect
#' @param which_formula which formula to use e.g., \code{"X1"}
#'
#' @rawNamespace S3method(effects::Effect, fit_model)
#' @export
Effect.fit_model_aja<- function(focal.predictors, mod, which_formula = "X1", pad_values = c(), ...){
if(FALSE){
tar_load(vast_fit)
focal.predictors = c("Depth", "SST_seasonal", "BT_seasonal")
mod = fit_base
which_formula = "X1"
xlevels = 100
pad_values = c(0)
covariate_data_full<- mod$effects$covariate_data_full
catchability_data_full<- mod$effects$catchability_data_full
}
# Error checks
if(mod$data_list$n_c > 1 & which_formula %in% c("X1", "X2")){
stop("`Effect.fit_model` is not currently designed for multivariate models using density covariates")
}
if(!all(c("covariate_data_full", "catchability_data_full") %in% ls(.GlobalEnv))){
stop("Please load `covariate_data_full` and `catchability_data_full` into global memory")
}
if(!requireNamespace("effects")){
stop("please install the effects package")
}
if(!("effects" %in% names(mod))){
stop("`effects` slot not detected in input to `Effects.fit_model`. Please update model using later package version.")
}
# Identify formula-specific stuff
if(which_formula=="X1"){
formula_orig = mod$X1_formula
parname = "gamma1_cp"
mod$call = mod$effects$call_X1
}else if(which_formula=="X2"){
formula_orig = mod$X2_formula
parname = "gamma2_cp"
mod$call = mod$effects$call_X2
}else if(which_formula=="Q1"){
formula_orig = mod$Q1_formula
parname = "lambda1_k"
mod$call = mod$effects$call_Q1
}else if(which_formula=="Q2"){
formula_orig = mod$Q2_formula
parname = "lambda2_k"
mod$call = mod$effects$call_Q2
}else{
stop("Check `which_formula` input")
}
# Extract parameters / covariance
whichnum = which(names(mod$parameter_estimates$par) == parname)
mod$parhat = mod$parameter_estimates$par[whichnum]
if(is.null(mod$parameter_estimates$SD$cov.fixed)){
mod$covhat = array(0, dim = rep(length(mod$parhat), 2))
} else {
mod$covhat = mod$parameter_estimates$SD$cov.fixed[whichnum, whichnum, drop = FALSE]
}
# # Fill in values that are mapped off
# if(parname %in% names(mod$tmb_list$Obj$env$map)){
# mod$parhat = mod$parhat[mod$tmb_list$Obj$env$map[[parname]]]
# mod$covhat = mod$covhat[mod$tmb_list$Obj$env$map[[parname]], mod$tmb_list$Obj$env$map[[parname]], drop = FALSE]
# mod$parhat = ifelse(is.na(mod$parhat), 0, mod$parhat)
# mod$covhat = ifelse(is.na(mod$covhat), 0, mod$covhat)
# }
# add names
names(mod$parhat)[] = parname
if(length(pad_values) != 0){
parhat = rep(NA, length(mod$parhat) + length(pad_values))
parhat[setdiff(1:length(parhat), pad_values)] = mod$parhat
covhat = array(NA, dim = dim(mod$covhat) + rep(length(pad_values), 2))
covhat[setdiff(1:length(parhat), pad_values), setdiff(1:length(parhat), pad_values)] = mod$covhat
mod$parhat = ifelse(is.na(parhat), 0, parhat)
mod$covhat = ifelse(is.na(covhat), 0, covhat)
#parname = c("padded_intercept", parname)
}
#rownames(mod$covhat) = colnames(mod$covhat) = names(mod$parhat)
# Augment stuff
formula_full = stats::update.formula(formula_orig, linear_predictor ~. + 0)
mod$coefficients = mod$parhat
mod$vcov = mod$covhat
mod$formula = formula_full
mod$family = stats::gaussian(link = "identity")
if( FALSE ){
Tmp = model.matrix(formula_full, data=fit$effects$catchability_data )
}
# Functions for package
family.fit_model = function(x,...) x$family
vcov.fit_model = function(x,...) x$vcov
# dummy functions to make Effect.default work
dummyfuns = list(variance = function(mu) mu, initialize = expression(mustart = y + 0.1), dev.resids = function(...) stats::poisson()$dev.res(...) )
# Replace family (for reasons I don't really understand)
fam = mod$family
for(i in names(dummyfuns)){
if(is.null(fam[[i]])) fam[[i]] = dummyfuns[[i]]
}
# allow calculation of effects ...
if(length(formals(fam$variance)) >1) {
warning("overriding variance function for effects: computed variances may be incorrect")
fam$variance = dummyfuns$variance
}
# Bundle arguments
args = list(call = mod$call, coefficients = mod$coefficients, vcov = mod$vcov, family = fam, formula = formula_full)
# Do call
effects::Effect.default(focal.predictors, mod, ..., sources = args)
}
get_vast_covariate_effects<- function(vast_fit, params_plot, params_plot_levels, effects_pad_values, nice_category_names, out_dir, ...){
if(FALSE){
tar_load(vast_fit)
params_plot<- c("Depth", "SST_seasonal", "BT_seasonal")
params_plot_levels<- 100
effects_pad_values = c(1)
nice_category_names = "American lobster"
}
# Load covariate_data_full and catchability_data_full into global memory
assign("covariate_data_full", vast_fit$effects$covariate_data_full, envir = .GlobalEnv)
assign("catchability_data_full", vast_fit$effects$catchability_data_full, envir = .GlobalEnv)
# Going to loop through each of the values and create a dataframe with all of the information...
x1_rescale<- function(x) plogis(x)
x2_rescale<- function(x) exp(x)
for(i in seq_along(params_plot)){
pred_dat_temp_X1<- data.frame(Effect.fit_model_aja(focal.predictors = params_plot[i], mod = vast_fit, which_formula = "X1", xlevels = params_plot_levels, pad_values = effects_pad_values)) %>%
mutate(., "Lin_pred" = "X1")
pred_dat_temp_X2<- data.frame(Effect.fit_model_aja(focal.predictors = params_plot[i], mod = vast_fit, which_formula = "X2", xlevels = params_plot_levels, pad_values = effects_pad_values)) %>%
mutate(., "Lin_pred" = "X2")
# Combine into one...
pred_dat_out_temp<- bind_rows(pred_dat_temp_X1, pred_dat_temp_X2)
if(i == 1){
pred_dat_out<- pred_dat_out_temp
} else {
pred_dat_out<- bind_rows(pred_dat_out, pred_dat_out_temp)
}
}
# Save and return it
saveRDS(pred_dat_out, file = paste(out_dir, "/", nice_category_names, "_covariate_effects.rds", sep = ""))
return(pred_dat_out)
}
plot_vast_covariate_effects<- function(vast_covariate_effects, vast_fit, nice_category_names, out_dir, ...){
if(FALSE){
vast_covariate_effects<- read_rds(file = "~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/results/tables/American lobster_covariate_effects.rds")
tar_load(vast_fit)
vast_covariate_effects = pred_dat_out
vast_fit = fit_base
nice_category_names = "American lobster"
plot_rows = 2
out_dir = "~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/results/plots_maps/"
}
# Some reshaping...
names_stay<- c("fit", "se", "lower", "upper", "Lin_pred")
vast_cov_eff_l<- vast_covariate_effects %>%
pivot_longer(., names_to = "Variable", values_to = "Covariate_Value", -{{names_stay}}) %>%
drop_na(Covariate_Value)
# Plotting time...
# Need y max by linear predictors...
ylim_dat<- vast_cov_eff_l %>%
group_by(., Lin_pred, Variable) %>%
summarize(., "Min" = min(lower, na.rm = TRUE),
"Max" = max(upper, na.rm = TRUE))
plot_out<- ggplot() +
geom_ribbon(data = vast_cov_eff_l, aes(x = Covariate_Value, ymin = lower, ymax = upper), fill = "#bdbdbd") +
geom_line(data = vast_cov_eff_l, aes(x = Covariate_Value, y = fit)) +
xlab("Scaled covariate value") +
ylab("Linear predictor fitted value") +
facet_grid(Lin_pred ~ Variable, scales = "free") +
theme_bw() +
theme(strip.background = element_blank())
# Add in sample rug...
names_keep<- unique(vast_cov_eff_l$Variable)
samp_dat<- vast_fit$covariate_data %>%
dplyr::select({{names_keep}}) %>%
gather(., "Variable", "Covariate_Value")
plot_out2<- plot_out +
geom_rug(data = samp_dat, aes(x = Covariate_Value))
# Save and return it
ggsave(plot_out2, file = paste(out_dir, "/", nice_category_names, "_covariate_effects.jpg", sep = ""))
return(plot_out2)
}
######
## Plot samples, knots and mesh
######
vast_plot_design<- function(vast_fit, land, spat_grid, xlim = c(-80, -55), ylim = c(35, 50), land_color = "#f0f0f0", out_dir){
if(FALSE){
tar_load(vast_fit)
tar_load(land_sf)
spat_grid = "~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/data/predict/predict_stack_SST_seasonal_mean.grd"
land = land_sf
xlim = c(-80, -55)
ylim = c(35, 50)
land_color = "#f0f0f0"
vast_fit = vast_fitted
land = land_use
spat_grid = spat_grid
xlim = xlim_use
ylim = ylim_use
land_color = "#f0f0f0"
out_dir = main_dir
}
# Read in raster
spat_grid<- rotate(raster::stack(spat_grid)[[1]])
# Intensity surface of sample locations and then a plot of the knot locations/mesh over the top?
samp_dat<- vast_fit$data_frame %>%
distinct(., Lon_i, Lat_i, .keep_all = TRUE) %>%
st_as_sf(., coords = c("Lon_i", "Lat_i"), remove = FALSE, crs = st_crs(land))
cell_samps<- table(cellFromXY(spat_grid, data.frame("x" = samp_dat$Lon_i, "y" = samp_dat$Lat_i)))
# Put back into raster...
spat_grid[]<- 0
spat_grid[as.numeric(names(cell_samps))]<- cell_samps
spat_grid_plot<- as.data.frame(spat_grid, xy = TRUE)
names(spat_grid_plot)[3]<- "Samples"
spat_grid_plot$Samples<- ifelse(spat_grid_plot$Samples == 0, NA, spat_grid_plot$Samples)
tow_samps<- ggplot() +
geom_tile(data = spat_grid_plot, aes(x = x, y = y, fill = Samples)) +
scale_fill_gradient2(name = "Tow samples", low = "#bdbdbd", high = "#525252", na.value = "white") +
geom_sf(data = land, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = 0) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt")) +
ggtitle("Tow samples")
# Knots and mesh...
# Getting spatial information
spat_data<- vast_fit$extrapolation_list
extrap_grid<- data.frame("Lon" = as.numeric(spat_data$Data_Extrap$Lon), "Lat" = as.numeric(spat_data$Data_Extrap$Lat)) %>%
distinct(., Lon, Lat)
tow_samps_grid<- tow_samps +
geom_point(data = extrap_grid, aes(x = Lon, y = Lat), fill = "#41ab5d", pch = 21, size = 0.75) +
ggtitle("VAST spatial extrapolation grid")
# Get mesh as sf
mesh_sf<- vast_mesh_to_sf(vast_fit, crs_transform = "+proj=longlat +datum=WGS84 +no_defs")$triangles
tow_samps_mesh<- tow_samps +
geom_sf(data = land, fill = land_color, lwd = 0.2, na.rm = TRUE) +
geom_sf(data = mesh_sf, fill = NA, color = "#41ab5d") +
coord_sf(xlim = xlim, ylim = ylim, expand = 0) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt")) +
ggtitle("INLA Mesh")
# Plot em together
plot_out<- tow_samps + tow_samps_grid + tow_samps_mesh
# Save it
ggsave(plot_out, file = paste(out_dir, "/", "samples_grid_knots_plot.jpg", sep = ""), height = 8, width = 11)
return(plot_out)
}
#####
## Plot covariate values
#####
plot_spattemp_cov_ts<- function(predict_covariates_stack_agg, summarize = "seasonal", ensemble_stat = "mean", all_tows_with_all_covs, regions, land, out_dir){
if(FALSE){
tar_load(predict_covariates_stack_agg_out)
predict_covariates_stack_agg<- predict_covariates_stack_agg_out
summarize = "seasonal"
ensemble_stat = "mean"
tar_load(all_tows_with_all_covs)
tar_load(land_sf)
land = land_sf
tar_load(index_shapefiles)
out_dir<- "~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/results/plots_maps/"
}
# Get raster stack covariate files
rast_files_load<- list.files(predict_covariates_stack_agg, pattern = paste0(summarize, "_", ensemble_stat, ".grd"), full.names = TRUE)
# Get variable names
cov_names_full<- list.files(predict_covariates_stack_agg, pattern = paste0(summarize, "_", ensemble_stat, ".grd"), full.names = FALSE)
predict_covs_names<- gsub(paste("_", ensemble_stat, ".grd", sep = ""), "", gsub("predict_stack_", "", cov_names_full))
# Loop through
for(i in seq_along(rast_files_load)){
# Get variable names
cov_names_full<- list.files(predict_covariates_stack_agg, pattern = paste0(summarize, "_", ensemble_stat, ".grd"), full.names = FALSE)[i]
predict_covs_names<- gsub(paste("_", ensemble_stat, ".grd", sep = ""), "", gsub("predict_stack_", "", cov_names_full))
# Prediction values
spattemp_summs<- data.frame(raster::extract(raster::rotate(raster::stack(rast_files_load[i])), index_shapefiles, fun = mean))
spattemp_summs$Region<- factor(unique(as.character(index_shapefiles$Region)), levels = c("NMFS_and_DFO", "DFO", "Scotian_Shelf", "NMFS", "Gulf_of_Maine", "Georges_Bank", "Southern_New_England", "Mid_Atlantic_Bight"))
spattemp_summs<- spattemp_summs %>%
drop_na(., Region)
# Gather
spattemp_summs_df<- spattemp_summs %>%
pivot_longer(., names_to = "Time", values_to = "Value", -Region)
# Formatting Time
spattemp_summs_df<- spattemp_summs_df %>%
mutate(., Date = gsub("X", "", gsub("[.]", "-", Time)))
spattemp_summs_df$Date<- as.Date(paste(as.numeric(gsub("([0-9]+).*$", "\\1", spattemp_summs_df$Date)), ifelse(grepl("Spring", spattemp_summs_df$Date), "-04-15", ifelse(grepl("Summer", spattemp_summs_df$Date), "-07-15", ifelse(grepl("Winter", spattemp_summs_df$Date), "-12-15", "-10-15"))), sep = ""))
# Data values
cov_dat<- all_tows_with_all_covs %>%
dplyr::select(., Season_Match, DECDEG_BEGLON, DECDEG_BEGLAT, {{predict_covs_names}})
cov_dat$Date<- as.Date(paste(as.numeric(gsub("([0-9]+).*$", "\\1", cov_dat$Season_Match)), ifelse(grepl("Spring", cov_dat$Season_Match), "-04-15", ifelse(grepl("Summer", cov_dat$Season_Match), "-07-15", ifelse(grepl("Winter", cov_dat$Season_Match), "-12-15", "-10-15"))), sep = ""))
# Get summary by region...
cov_dat<- cov_dat %>%
st_as_sf(., coords = c("DECDEG_BEGLON", "DECDEG_BEGLAT"), crs = st_crs(index_shapefiles), remove = FALSE) %>%
st_join(., index_shapefiles, join = st_within) %>%
st_drop_geometry()
cov_dat_plot<- cov_dat %>%
group_by(., Date, Region) %>%
summarize_at(., .vars = {{predict_covs_names}}, .funs = mean, na.rm = TRUE)
cov_dat_plot$Region<- factor(cov_dat_plot$Region, levels = c("NMFS_and_DFO", "DFO", "Scotian_Shelf", "NMFS", "Gulf_of_Maine", "Georges_Bank", "Southern_New_England", "Mid_Atlantic_Bight"))
cov_dat_plot<- cov_dat_plot %>%
drop_na(., c({{predict_covs_names}}, Region))
# Plot
if(predict_covs_names == "Depth"){
plot_out<- ggplot() +
geom_histogram(data = spattemp_summs_df, aes(y = Value, color = Region)) +
scale_color_manual(name = "Region", values = c('#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d','#666666')) +
geom_histogram(data = cov_dat_plot, aes(y = Depth), fill = "black", pch = 21, alpha = 0.2) +
facet_wrap(~Region, nrow = 2) +
theme_bw()
}
if(predict_covs_names == "BS_seasonal"){
plot_out<- ggplot() +
geom_line(data = spattemp_summs_df, aes(x = Date, y = Value, color = Region)) +
scale_color_manual(name = "Region", values = c('#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d','#666666')) +
geom_point(data = cov_dat_plot, aes(x = Date, y = BS_seasonal), fill = "black", pch = 21, alpha = 0.2) +
facet_wrap(~Region, nrow = 2) +
theme_bw()
}
if(predict_covs_names == "SS_seasonal"){
plot_out<- ggplot() +
geom_line(data = spattemp_summs_df, aes(x = Date, y = Value, color = Region)) +
scale_color_manual(name = "Region", values = c('#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d','#666666')) +
geom_point(data = cov_dat_plot, aes(x = Date, y = SS_seasonal), fill = "black", pch = 21, alpha = 0.2) +
facet_wrap(~Region, nrow = 2) +
theme_bw()
}
if(predict_covs_names == "BT_seasonal"){
plot_out<- ggplot() +
geom_line(data = spattemp_summs_df, aes(x = Date, y = Value, color = Region)) +
scale_color_manual(name = "Region", values = c('#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d','#666666')) +
geom_point(data = cov_dat_plot, aes(x = Date, y = BT_seasonal), fill = "black", pch = 21, alpha = 0.2) +
facet_wrap(~Region, nrow = 2) +
theme_bw()
}
if(predict_covs_names == "SST_seasonal"){
plot_out<- ggplot() +
geom_line(data = spattemp_summs_df, aes(x = Date, y = Value, color = Region)) +
scale_color_manual(name = "Region", values = c('#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d','#666666')) +
geom_point(data = cov_dat_plot, aes(x = Date, y = SST_seasonal), fill = "black", pch = 21, alpha = 0.2) +
facet_wrap(~Region, nrow = 2) +
theme_bw()
}
ggsave(paste(out_dir, "/", predict_covs_names, "_covariate_plot.jpg", sep = ""), plot_out)
}
}
#####
## VAST inla mesh to sf object
#####
#' @title Convert VAST INLA mesh to sf object
#'
#' @description Convert inla.mesh to sp objects, totally taken from David Keith here https://github.com/Dave-Keith/Paper_2_SDMs/blob/master/mesh_build_example/convert_inla_mesh_to_sf.R and Finn Lindgren here
# # https://groups.google.com/forum/#!topic/r-inla-discussion-group/z1n1exlZrKM
#'
#' @param vast_fit A fitted VAST model
#' @param crs_transform Optional crs to transform mesh into
#' @return A list with \code{sp} objects for triangles and vertices:
# \describe{
# \item{triangles}{\code{SpatialPolygonsDataFrame} object with the triangles in
# the same order as in the original mesh, but each triangle looping through
# the vertices in clockwise order (\code{sp} standard) instead of
# counterclockwise order (\code{inla.mesh} standard). The \code{data.frame}
# contains the vertex indices for each triangle, which is needed to link to
# functions defined on the vertices of the triangulation.
# \item{vertices}{\code{SpatialPoints} object with the vertex coordinates,
# in the same order as in the original mesh.}
# }
#' @export
#
vast_mesh_to_sf <- function(vast_fit, crs_transform = "+proj=longlat +datum=WGS84 +no_defs") {
if(FALSE){
tar_load(vast_fit)
crs_transform = "+proj=longlat +datum=WGS84 +no_defs"
}
require(sp) || stop("Install sp, else thine code shan't work for thee")
require(sf) || stop('Install sf or this code will be a mess')
require(INLA) || stop("You need the R-INLA package for this, note that it's not crantastic...
install.packages('INLA', repos=c(getOption('repos'), INLA='https://inla.r-inla-download.org/R/stable'), dep=TRUE)")
# Get the extrapolation mesh information from the vast_fitted object
mesh<- vast_fit$spatial_list$MeshList$anisotropic_mesh
mesh['crs']<- vast_fit$extrapolation_list$projargs
# Grab the CRS if it exists, NA is fine (NULL spits a warning, but is also fine)
crs <- sp::CRS(mesh$crs)
# Make sure the CRS isn't a geocentric one, which is won't be if yo look up geocentric..
#isgeocentric <- identical(inla.as.list.CRS(crs)[["proj"]], "geocent")
isgeocentric <- inla.crs_is_geocent(mesh$crs)
# Look up geo-centric coordinate systems, nothing we'll need to worry about, but stop if so
if (isgeocentric || (mesh$manifold == "S2")) {
stop(paste0(
"'sp and sf' don't support storing polygons in geocentric coordinates.\n",
"Convert to a map projection with inla.spTransform() before calling inla.mesh2sf()."))
}
# This pulls out from the mesh the triangles as polygons, this was the piece I couldn't figure out.
triangles <- SpatialPolygonsDataFrame(Sr = SpatialPolygons(
lapply(
1:nrow(mesh$graph$tv),
function(x) {
tv <- mesh$graph$tv[x, , drop = TRUE]
Polygons(list(Polygon(mesh$loc[tv[c(1, 3, 2, 1)],1:2,drop = FALSE])),ID = x)
}
),
proj4string = crs
),
data = as.data.frame(mesh$graph$tv[, c(1, 3, 2), drop = FALSE]),
match.ID = FALSE
)
# This one is easy, just grab the vertices (points)
vertices <- SpatialPoints(mesh$loc[, 1:2, drop = FALSE], proj4string = crs)
# Make these sf objects
triangles <- st_as_sf(triangles)
vertices <- st_as_sf(vertices)
# Transform?
if(!is.null(crs_transform)){
triangles<- st_transform(triangles, crs = crs_transform)
vertices<- st_transform(vertices, crs = crs_transform)
}
# Add your output list.
return_sf<- list(triangles = triangles, vertices = vertices)
return(return_sf)
}
#' @title Plot VAST model spatial and spatio-temporal surfaces
#'
#' @description Creates either a panel plot or a gif of VAST model spatial or spatio-temporal parameter surfaces or derived quantities
#'
#' @param vast_fit = A VAST `fit_model` object.
#' @param spatial_var = An estimated spatial coefficient or predicted value. Currently works for `D_gct`, `R1_gct`, `R2_gct`, `P1_gct`, `P2_gct`, `Omega1_gc`, `Omega2_gc`, `Epsilon1_gct`, `Epsilon2_gct`.
#' @param nice_category_names = A
#' @param all_times = A vector of all of the unique time steps available from the VAST fitted model
#' @param plot_times = Either NULL to make a plot for each time in `all_times` or a vector of all of the times to plot, which must be a subset of `all_times`
#' @param land_sf = Land sf object
#' @param xlim = A two element vector with the min and max longitudes
#' @param ylim = A two element vector with the min and max latitudes
#' @param panel_or_gif = A character string of either "panel" or "gif" indicating how the multiple plots across time steps should be displayed
#' @param out_dir = Output directory to save the panel plot or gif
#'
#' @return A VAST fit_model object, with the inputs and and outputs, including parameter estimates, extrapolation gid info, spatial list info, data info, and TMB info.
#'
#' @export
vast_fit_plot_spatial<- function(vast_fit, spatial_var, nice_category_names, mask, all_times = all_times, plot_times = NULL, land_sf, xlim, ylim, panel_or_gif = "gif", out_dir, land_color = "#d9d9d9", panel_cols = NULL, panel_rows = NULL, ...){
if(FALSE){
tar_load(vast_fit)
template = raster("~/GitHub/sdm_workflow/scratch/aja/TargetsSDM/data/supporting/HighResTemplate.grd")
tar_load(vast_seasonal_data)
all_times = as.character(levels(vast_seasonal_data$VAST_YEAR_SEASON))
plot_times = NULL
tar_load(land_sf)
tar_load(region_shapefile)
mask = region_shapefile
land_color = "#d9d9d9"
res_data_path = "~/Box/RES_Data/"
xlim = c(-85, -55)
ylim = c(30, 50)
panel_or_gif = "gif"
panel_cols = NULL
panel_rows = NULL
vast_fit = vast_fitted
spatial_var = "D_gct"
nice_category_names = "Atlantic halibut"
mask = region_shape
all_times = as.character(unique(vast_sample_data$EST_YEAR))
plot_times = NULL
land_sf = land_use
xlim = xlim_use
ylim = ylim_use
panel_or_gif = "panel"
out_dir = here::here("", "results/plots_maps")
land_color = "#d9d9d9"
panel_cols = 6
panel_rows = 7
}
# Plotting at spatial knots...
# First check the spatial_var, only a certain subset are being used...
if(!spatial_var %in% c("D_gct", "R1_gct", "R2_gct", "P1_gct", "P2_gct", "Omega1_gc", "Omega2_gc", "Epsilon1_gct", "Epsilon2_gct")){
stop(print("Check `spatial_var` input. Currently must be one of `D_gct`, `R1_gct`, `R2_gct`, `P1_gct`, `P2_gct`, `Omega1_gc`, `Omega2_gc`, `Epsilon1_gct`, `Epsilon2_gct`."))
}
# Getting prediction array
pred_array<- vast_fit$Report[[{{spatial_var}}]]
if(spatial_var == "D_gct"){
pred_array<- log(pred_array+1)
}
# Getting time info
if(!is.null(plot_times)){
plot_times<- all_times[which(all_times) %in% plot_times]
} else {
plot_times<- all_times
}
# Getting spatial information
spat_data<- vast_fit$extrapolation_list
loc_g<- spat_data$Data_Extrap[which(spat_data$Data_Extrap[, "Include"] > 0), c("Lon", "Lat")]
CRS_orig<- sp::CRS("+proj=longlat")
CRS_proj<- sp::CRS(spat_data$projargs)
land_sf<- st_crop(land_sf, xmin = xlim[1], ymin = ylim[1], xmax = xlim[2], ymax = ylim[2])
# Looping through...
rasts_out<- vector("list", dim(pred_array)[length(dim(pred_array))])
rasts_range<- pred_array
rast_lims_min<- ifelse(spatial_var %in% c("D_gct", "R1_gct", "R2_gct", "P1_gct", "P2_gct"), 0, min(rasts_range))
rast_lims_max<- ifelse(spatial_var %in% c("D_gct", "R1_gct", "R2_gct", "P1_gct", "P2_gct"), round(max(rasts_range) + 0.0000001, 2), max(rasts_range))
rast_lims<- c(rast_lims_min, rast_lims_max)
if(length(dim(pred_array)) == 2){
data_df<- data.frame(loc_g, z = pred_array)
# Interpolation
pred_df<- na.omit(data.frame("x" = data_df$Lon, "y" = data_df$Lat, "layer" = data_df$z))
pred_df_interp<- interp(pred_df[,1], pred_df[,2], pred_df[,3], duplicate = "mean", extrap = TRUE,
xo=seq(-87.99457, -57.4307, length = 115),
yo=seq(22.27352, 48.11657, length = 133))
pred_df_interp_final<- data.frame(expand.grid(x = pred_df_interp$x, y = pred_df_interp$y), z = c(round(pred_df_interp$z, 2)))
pred_sp<- st_as_sf(pred_df_interp_final, coords = c("x", "y"), crs = CRS_orig)
pred_df_temp<- pred_sp[which(st_intersects(pred_sp, mask, sparse = FALSE) == TRUE),]
coords_keep<- as.data.frame(st_coordinates(pred_df_temp))
row.names(coords_keep)<- NULL
pred_df_use<- data.frame(cbind(coords_keep, "z" = as.numeric(pred_df_temp$z)))
names(pred_df_use)<- c("x", "y", "z")
plot_out<- ggplot() +
geom_tile(data = pred_df_use, aes(x = x, y = y, fill = z)) +
scale_fill_viridis_c(name = spatial_var, option = "viridis", na.value = "transparent", limits = rast_lims) +
annotate("text", x = -65, y = 37.5, label = spatial_var) +
geom_sf(data = land_sf, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = FALSE) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt"))
ggsave(filename = paste(out_dir, "/", nice_category_names, "_", spatial_var, ".png", sep = ""), plot_out, width = 11, height = 8, units = "in")
return(plot_out)
} else {
for (tI in 1:dim(pred_array)[3]) {
data_df<- data.frame(loc_g, z = pred_array[,1,tI])
# Interpolation
pred_df<- na.omit(data.frame("x" = data_df$Lon, "y" = data_df$Lat, "layer" = data_df$z))
pred_df_interp<- interp(pred_df[,1], pred_df[,2], pred_df[,3], duplicate = "mean", extrap = TRUE,
xo=seq(-87.99457, -57.4307, length = 115),
yo=seq(22.27352, 48.11657, length = 133))
pred_df_interp_final<- data.frame(expand.grid(x = pred_df_interp$x, y = pred_df_interp$y), z = c(round(pred_df_interp$z, 2)))
pred_sp<- st_as_sf(pred_df_interp_final, coords = c("x", "y"), crs = CRS_orig)
pred_df_temp<- pred_sp[which(st_intersects(pred_sp, mask, sparse = FALSE) == TRUE),]
coords_keep<- as.data.frame(st_coordinates(pred_df_temp))
row.names(coords_keep)<- NULL
pred_df_use<- data.frame(cbind(coords_keep, "z" = as.numeric(pred_df_temp$z)))
names(pred_df_use)<- c("x", "y", "z")
time_plot_use<- plot_times[tI]
rasts_out[[tI]]<- ggplot() +
geom_tile(data = pred_df_use, aes(x = x, y = y, fill = z)) +
scale_fill_viridis_c(name = spatial_var, option = "viridis", na.value = "transparent", limits = rast_lims) +
annotate("text", x = -65, y = 37.5, label = time_plot_use) +
geom_sf(data = land_sf, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = FALSE) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt"))
}
if(panel_or_gif == "panel"){
# Panel plot
all_plot<- wrap_plots(rasts_out, ncol = panel_cols, nrow = panel_rows, guides = "collect", theme(plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt")))
ggsave(filename = paste0(out_dir, "/", nice_category_names, "_", spatial_var, ".png"), all_plot, width = 11, height = 8, units = "in")
return(all_plot)
} else {
# Make a gif
plot_loop_func<- function(plot_list){
for (i in seq_along(plot_list)) {
plot_use<- plot_list[[i]]
print(plot_use)
}
}
invisible(save_gif(plot_loop_func(rasts_out), paste0(out_dir, "/", nice_category_names, "_", spatial_var, ".gif"), delay = 0.75, progress = FALSE))
}
}
}
#' @title Get VAST point predictions
#'
#' @description Generates a dataframe with observed and VAST model predictions at sample locations
#'
#' @param vast_fit = A VAST `fit_model` object.
#' @param use_PredTF_only = Logical TRUE/FALSE. If TRUE, then only the locations specified as PredTF == 1 will be extracted. Otherwise, all points will be included.
#' @param nice_category_names
#' @param out_dir = Output directory to save the dataset
#'
#' @return A dataframe with lat, lon, observations and model predictions
#'
#' @export
vast_get_point_preds<- function(vast_fit, use_PredTF_only, nice_category_names, out_dir){
if(FALSE){
vast_fit = vast_fitted
use_PredTF_only = FALSE
nice_category_names<- "Atlantic halibut"
out_dir = here::here("", "results/tables")
}
# Collecting the sample data
samp_dat<- vast_fit$data_frame %>%
dplyr::select(., Lat_i, Lon_i, b_i, t_i)
names(samp_dat)<- c("Lat", "Lon", "Biomass", "Year")
samp_dat$Presence<- ifelse(samp_dat$Biomass > 0, 1, 0)
# Now, getting the model predictions
pred_dat<- vast_fit$Report
# Combine em
samp_pred_out<- data.frame(samp_dat, "Predicted_ProbPresence" = pred_dat$R1_i, "Predicted_Biomass" = pred_dat$D_i)
# Add PredTF column -- this is 1 if the sample is only going to be used in predicted probability and NOT in estimating the likelihood
samp_pred_out$PredTF_i<- vast_fit$data_list$PredTF_i
# Subset if use_PredTF_only is TRUE
if(use_PredTF_only){
samp_pred_out<- samp_pred_out %>%
dplyr::filter(., PredTF_i == 1)
}
# Save and return it
saveRDS(samp_pred_out, paste0(out_dir, "/", nice_category_names, "_obs_pred.rds"))
return(samp_pred_out)
}
#' @title Get VAST knot predictions for spatial or spatio-temporal parameters/derived quantities
#'
#' @description Generates a dataframe with VAST model spatial or spatio-temporal parameters/derived quantities at each knot location
#'
#' @param vast_fit = A VAST `fit_model` object.
#' @param spatial_var = An estimated spatial coefficient or predicted value. Currently works for `D_gct`, `R1_gct`, `R2_gct`, `P1_gct`, `P2_gct`, `Omega1_gc`, `Omega2_gc`, `Epsilon1_gct`, `Epsilon2_gct`.
#' @param nice_category_names
#' @param out_dir = Output directory to save the dataframe
#'
#' @return A dataframe with lat, lon, observations and model predictions
#'
#' @export
vast_get_extrap_spatial<- function(vast_fit,spatial_var, nice_category_names, out_dir){
if(FALSE){
vast_fit = vast_fitted
spatial_var = "D_gct"
nice_category_names<- "Atlantic_halibut"
out_dir = here::here("", "results/tables")
}
# First check the spatial_var, only a certain subset are being used...
if(!spatial_var %in% c("D_gct", "R1_gct", "R2_gct", "P1_gct", "P2_gct", "Omega1_gc", "Omega2_gc", "Epsilon1_gct", "Epsilon2_gct")){
stop(print("Check `spatial_var` input. Currently must be one of `D_gct`, `R1_gct`, `R2_gct`, `P1_gct`, `P2_gct`, `Omega1_gc`, `Omega2_gc`, `Epsilon1_gct`, `Epsilon2_gct`."))
}
# Getting prediction array
pred_array<- vast_fit$Report[[{{spatial_var}}]]
if(spatial_var == "D_gct"){
pred_array<- log(pred_array+1)
}
# Getting time info
times<- as.character(vast_fit$year_labels)
# Getting extrapolation grid locations
spat_data<- vast_fit$extrapolation_list
loc_g<- spat_data$Data_Extrap[which(spat_data$Data_Extrap[, "Include"] > 0), c("Lon", "Lat")]
# Creating the dataframe to save...
df_out_temp<- as.data.frame(pred_array)
colnames(df_out_temp) = paste0("Time_", times)
df_out_temp<- cbind(loc_g, df_out_temp)
df_out<- df_out_temp %>%
pivot_longer(., cols = !c("Lon", "Lat"), names_to = "Time", values_to = {{spatial_var}}) %>%
arrange(., Time, Lon, Lat)
# Save and return it
saveRDS(df_out, paste0(out_dir, "/", nice_category_names, "_", spatial_var, "_df.rds"))
return(df_out)
}
#' @title Plot VAST center of gravity
#'
#' @description Blah
#'
#' @param vast_fit = A VAST `fit_model` object.
#' @param land_sf = Land sf object
#' @param xlim = A two element vector with the min and max longitudes
#' @param ylim = A two element vector with the min and max latitudes
#' @param nice_category_names = Species name
#' @param out_dir = Output directory to save the dataset
#'
#' @return Blah
#'
#' @export
vast_plot_cog<- function(vast_fit, all_times, summarize = TRUE, land_sf, xlim, ylim, nice_category_names, land_color = "#d9d9d9", color_pal = NULL, out_dir){
if(FALSE){
tar_load(vast_fit)
all_times = levels(vast_seasonal_data$VAST_YEAR_SEASON)
tar_load(land_sf)
land_sf = land_sf
xlim = c(-80, -55)
ylim = c(35, 50)
nice_category_names<- nice_category_names
land_color = "#d9d9d9"
out_dir = paste0(res_root, "plots_maps")
vast_fit = vast_fitted
all_times = unique(vast_sample_data$Year)
summarize = TRUE
land_sf = land_use
xlim = xlim_use
ylim = ylim_use
nice_category_names = "Atlantic_halibut"
land_color = "#d9d9d9"
color_pal = NULL
out_dir = here::here("", "results/plots_maps")
}
TmbData<- vast_fit$data_list
Sdreport<- vast_fit$parameter_estimates$SD
# Time series steps
time_ind<- 1:TmbData$n_t
time_labels<- sort(unique(vast_fit$data_frame$t_i)[time_ind])
# Categories
categories_ind<- 1:TmbData$n_c
# Get the index information
SD<- TMB::summary.sdreport(Sdreport)
SD_stderr<- TMB:::as.list.sdreport(Sdreport, what = "Std. Error", report = TRUE)
SD_estimate<- TMB:::as.list.sdreport(Sdreport, what = "Estimate", report = TRUE)
if(vast_fit$settings$bias.correct == TRUE && "unbiased" %in% names(Sdreport)){
SD_estimate_biascorrect<- TMB:::as.list.sdreport(Sdreport, what = "Std. (bias.correct)", report = TRUE)
}
# Now, populate array with values
mean_Z_ctm = array(NA, dim = c(unlist(TmbData[c('n_c','n_t')]), 2, 2), dimnames = list(categories_ind, time_labels, c('Lon', 'Lat'), c('Estimate','Std. Error')))
mean_Z_ctm[] = SD[which(rownames(SD) == "mean_Z_ctm"), c('Estimate','Std. Error')]
index_res_array = mean_Z_ctm
# Data manipulation to get out out the array and to something more "plottable"
for(i in seq_along(categories_ind)){
index_array_temp<- index_res_array[i, , , ]
index_res_temp_est<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,1])
index_res_temp_sd<- data.frame("Time" = as.numeric(rownames(index_array_temp[,,1])), "Category" = categories_ind[i], index_array_temp[,,2])
names(index_res_temp_sd)[3:4]<- c("Lon_SD", "Lat_SD")
index_res_temp_out<- index_res_temp_est %>%
left_join(., index_res_temp_sd) %>%
mutate(., "Lon_Min" = Lon - Lon_SD,
"Lon_Max" = Lon + Lon_SD,
"Lat_Min" = Lat - Lat_SD,
"Lat_Max" = Lat + Lat_SD)
if(i == 1){
index_res_out<- index_res_temp_out
} else {
index_res_out<- bind_rows(index_res_out, index_res_temp_out)
}
}
# Get date info instead of time..
# if(!is.null(vast_fit$covariate_data)){
# year_start<- min(as.numeric(as.character(vast_fit$covariate_data$Year_Cov)))
#
# if(any(grepl("Season", vast_fit$X1_formula))){
# seasons<- nlevels(unique(vast_fit$covariate_data$Season))
# if(seasons == 3){
# time_labels_use<- paste(rep(seq(from = year_start, to = max(as.numeric(as.character(vast_fit$covariate_data$Year_Cov)))), each = 3), rep(c("SPRING", "SUMMER", "FALL")), sep = "-")
# }
# } else {
# time_labels_use<- paste(rep(seq(from = year_start, to = max(as.numeric(as.character(vast_fit$covariate_data$Year_Cov)))), each = 1), rep(c("FALL")), sep = "-")
# }
#
# index_res_out$Date<- factor(all_times, levels = time_labels_use)
#
# } else {
# # Just basic years...
# time_labels_use<- seq(from = min(vast_fit$year_labels), to = max(vast_fit$year_labels))
# index_res_out$Date<- factor(time_labels_use, levels = time_labels_use)
# }
#
index_res_out$Date<- factor(all_times, levels = all_times)
# Date info
index_res_out<- index_res_out %>%
mutate(., Year = as.numeric(gsub("([0-9]+).*$", "\\1", Date)))
if(any(str_detect(as.character(index_res_out$Date), LETTERS))){
index_res_out$Date<- as.Date(paste(index_res_out$Year, ifelse(grepl("SPRING", index_res_out$Date), "-04-15",
ifelse(grepl("SUMMER", index_res_out$Date), "-07-15", "-10-15")), sep = ""))
} else {
index_res_out$Date<- as.Date(paste(index_res_out$Year, "-06-15", sep = ""))
}
# Summarize to a year?
if(summarize){
index_res_out<- index_res_out %>%
group_by(., Year, Category, .drop = FALSE) %>%
summarize_at(., vars(c("Lon", "Lat", "Lon_Min", "Lon_Max", "Lat_Min", "Lat_Max")), mean, na.rm = TRUE)
}
# Making our plots...
# First, the map.
cog_sf<- st_as_sf(index_res_out, coords = c("Lon", "Lat"), crs = attributes(vast_fit$spatial_list$loc_i)$projCRS)
# Transform to be in WGS84
cog_sf_wgs84<- st_transform(cog_sf, st_crs(land_sf))
# Base map
cog_plot<- ggplot() +
geom_sf(data = cog_sf_wgs84, aes(fill = Year), size = 2, shape = 21) +
scale_fill_viridis_c(name = "Year", limits = c(min(cog_sf_wgs84$Year), max(cog_sf_wgs84$Year))) +
geom_sf(data = land_sf, fill = land_color, lwd = 0.2, na.rm = TRUE) +
coord_sf(xlim = xlim, ylim = ylim, expand = FALSE) +
theme(panel.background = element_rect(fill = "white"), panel.border = element_rect(fill = NA), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title = element_blank(), plot.margin = margin(t = 0.05, r = 0.05, b = 0.05, l = 0.05, unit = "pt"))
# Now, the lon/lat time series
lon_lat_df<- cog_sf_wgs84 %>%
data.frame(st_coordinates(.))
lon_lat_min<- st_as_sf(index_res_out, coords = c("Lon_Min", "Lat_Min"), crs = attributes(vast_fit$spatial_list$loc_i)$projCRS) %>%
st_transform(., st_crs(land_sf)) %>%
data.frame(st_coordinates(.)) %>%
dplyr::select(c("X", "Y"))
names(lon_lat_min)<- c("Lon_Min_WGS", "Lat_Min_WGS")
lon_lat_max<- st_as_sf(index_res_out, coords = c("Lon_Max", "Lat_Max"), crs = attributes(vast_fit$spatial_list$loc_i)$projCRS) %>%
st_transform(., st_crs(land_sf)) %>%
data.frame(st_coordinates(.)) %>%
dplyr::select(c("X", "Y"))
names(lon_lat_max)<- c("Lon_Max_WGS", "Lat_Max_WGS")
lon_lat_df<- cbind(lon_lat_df, lon_lat_min, lon_lat_max)
names(lon_lat_df)[8:9]<- c("Lon", "Lat")
lon_lat_df$Date<- as.Date(paste0(lon_lat_df$Year, "-06-15"))
if(!is.null(color_pal)){
colors_use<- color_pal
} else {
color_pal<- c('#66c2a5','#fc8d62','#8da0cb','#e78ac3','#a6d854')
colors_use<- color_pal[1:length(unique(lon_lat_df$Category))]
}
lon_ts<- ggplot() +
geom_ribbon(data = lon_lat_df, aes(x= Date, ymin = Lon_Min_WGS, ymax = Lon_Max_WGS), fill = '#66c2a5', alpha = 0.3) +
geom_line(data = lon_lat_df, aes(x = Date, y = Lon), color = '#66c2a5', lwd = 2) +
#scale_fill_manual(name = "Category", values = '#66c2a5') +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
ylab("Center of longitude") +
xlab("Date") +
theme_bw() +
theme(legend.title = element_blank(), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1))
lat_ts<- ggplot() +
geom_ribbon(data = lon_lat_df, aes(x= Date, ymin = Lat_Min_WGS, ymax = Lat_Max_WGS), fill = '#66c2a5', alpha = 0.3) +
geom_line(data = lon_lat_df, aes(x = Date, y = Lat), color = '#66c2a5', lwd = 2) +
#scale_fill_manual(name = "Category", values = '#66c2a5') +
scale_x_date(date_breaks = "5 year", date_labels = "%Y") +
ylab("Center of latitude") +
xlab("Date") +
theme_bw() +
theme(legend.title = element_blank(), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1))
plot_out<- (cog_plot) / (lon_ts + lat_ts) + plot_layout(ncol = 1, nrow = 2, widths = c(0.75, 1), heights = c(0.75, 1))
# Save and return it
ggsave(plot_out, file = paste(out_dir, "/COG_", "_", nice_category_names, ".jpg", sep = ""))
return(plot_out)
}
|
#!/usr/bin/env Rscript
library(ggplot2)
library(ade4)
library(RColorBrewer)
arg=commandArgs(trailingOnly=TRUE)
mat<-read.table(arg[1],sep="\t",row.names = 1,header=TRUE)
mat_sub <- mat[,1:2]
group <- as.factor(mat$group)
color <- c(brewer.pal(3,"Set1"))
ggplot(mat_sub, aes(x = PC1, y = PC2, color = group)) +
geom_point(aes(color = group), size = 3, alpha = 0.6) +
stat_ellipse(aes(x = PC1, y = PC2, fill = group), geom = "polygon", alpha = 0.1, level = 0.9) +
scale_fill_manual(values= color) +
scale_color_manual(values = color)
|
/bin/ellipse.r
|
no_license
|
TLlab/asthmatic-microbiota
|
R
| false
| false
| 538
|
r
|
#!/usr/bin/env Rscript
library(ggplot2)
library(ade4)
library(RColorBrewer)
arg=commandArgs(trailingOnly=TRUE)
mat<-read.table(arg[1],sep="\t",row.names = 1,header=TRUE)
mat_sub <- mat[,1:2]
group <- as.factor(mat$group)
color <- c(brewer.pal(3,"Set1"))
ggplot(mat_sub, aes(x = PC1, y = PC2, color = group)) +
geom_point(aes(color = group), size = 3, alpha = 0.6) +
stat_ellipse(aes(x = PC1, y = PC2, fill = group), geom = "polygon", alpha = 0.1, level = 0.9) +
scale_fill_manual(values= color) +
scale_color_manual(values = color)
|
#' Areal data calculation
#'
#' Computes three different summary statistics:
#' (1) `TotalArea` total area of each polygon;
#' (2) `AreaCovered` area covered by a multipolygon object within a high order polygon; and,
#' (3) `Ratio` ratio between `AreaCovered` and `TotalArea` i.e.
#' ratio between an area covered by a given set of features and total area of a higher-order geography polygon.
#'
#' The function requires two sets of polygon data: high-order and low-order geographic polygons
#'
#' @param polygon_layer multipolygon object of class \code{sf}, \code{sfc} or \code{sfg}.
#'
#' @param higher_geo_lay multipolygon object of class \code{sf}, \code{sfc} or \code{sfg}.
#'
#' @param unique_id_code a string; indicating a unique ID column of \code{higher_geo_lay},
#' used as the summary areas.
#'
#' @param crs coordinate reference system: integer with the EPSG code, or character based on proj4string.
#'
#' @return a \code{tibble} data frame object containing four columns is returned:
#'
#' - the \code{unique_id_code} of \code{higher_geo_lay}
#'
#' - the total area of each polygon
#' in \code{higher_geo_lay} (TotalArea),
#'
#' - the total area covered by \code{polygon_layer} features (AreaCovered),
#'
#' - the ratio between the total area covered by \code{polygon_layer} and total area of
#' \code{higher_geo_lay} polygon (Ratio).
#'
#' @examples
#' ## Run areal_calc() using the packages' dummy data sets.
#' ## The data sets are georeferenced on wgs84. However, a planar system is used to measure areas.
#' ## For the examples provided here, points and polygons relate to the United Kingdom.
#' ## So the British National Grid is used.
#'
#' ## Not run:
#' #outcome <- areal_calc(polygon_layer = pol_small,
#' #higher_geo_lay = pol_large,
#' #unique_id_code = "large_pol_",
#' #crs = "epsg:27700")
#' ## End(Not run)
#'
#'
#' @importFrom dplyr "%>%"
#'
#' @export
areal_calc <- function(polygon_layer,
higher_geo_lay,
unique_id_code,
crs) {
# we need a crs that is planar
crs = crs
# make sure that all layers have consistent CRS- in this case is WGS84
polygon_layer <- sf::st_transform(polygon_layer, crs)
higher_geo_lay <- sf::st_transform(higher_geo_lay, crs)
# calculate total area of the higher geography layer
higher_geo_lay$TotalArea <-
sf::st_area(higher_geo_lay$geometry)
# convert area of the higher geography layer to numeric too
higher_geo_lay$TotalArea <-
as.numeric(higher_geo_lay$TotalArea)
# assume that the attribute is constant throughout the geometry
sf::st_agr(polygon_layer) = "constant"
sf::st_agr(higher_geo_lay) = "constant"
#run the intersect function, converting the output to a tibble in the process
int <- dplyr::as_tibble(sf::st_intersection(polygon_layer, higher_geo_lay))
int$area <- sf::st_area(int$geometry)
# convert area to numeric
int$area <- as.numeric(int$area)
# remove polygons that are outside the grid boundaries to avoid getting errors
int <- int %>%
tidyr::drop_na(!!as.name(unique_id_code))
CoverByGeo <- int %>%
dplyr::group_by(!!as.name(unique_id_code)) %>% # '!!' this evaluates if it is true, when it is '!' evaluates if it is false
dplyr::summarise(AreaCovered = sum(area), .groups = 'drop_last')
# to calculate the ratio of area covered by the total area of the higher geography layer
combined_data <- dplyr::left_join(CoverByGeo, higher_geo_lay, by = unique_id_code)
combined_data$Ratio <- combined_data$AreaCovered / combined_data$TotalArea
results <- combined_data[,c(unique_id_code, "TotalArea", "AreaCovered", "Ratio")]
return(results)
}
|
/R/areal_calc.R
|
permissive
|
patnik/extRatum
|
R
| false
| false
| 3,670
|
r
|
#' Areal data calculation
#'
#' Computes three different summary statistics:
#' (1) `TotalArea` total area of each polygon;
#' (2) `AreaCovered` area covered by a multipolygon object within a high order polygon; and,
#' (3) `Ratio` ratio between `AreaCovered` and `TotalArea` i.e.
#' ratio between an area covered by a given set of features and total area of a higher-order geography polygon.
#'
#' The function requires two sets of polygon data: high-order and low-order geographic polygons
#'
#' @param polygon_layer multipolygon object of class \code{sf}, \code{sfc} or \code{sfg}.
#'
#' @param higher_geo_lay multipolygon object of class \code{sf}, \code{sfc} or \code{sfg}.
#'
#' @param unique_id_code a string; indicating a unique ID column of \code{higher_geo_lay},
#' used as the summary areas.
#'
#' @param crs coordinate reference system: integer with the EPSG code, or character based on proj4string.
#'
#' @return a \code{tibble} data frame object containing four columns is returned:
#'
#' - the \code{unique_id_code} of \code{higher_geo_lay}
#'
#' - the total area of each polygon
#' in \code{higher_geo_lay} (TotalArea),
#'
#' - the total area covered by \code{polygon_layer} features (AreaCovered),
#'
#' - the ratio between the total area covered by \code{polygon_layer} and total area of
#' \code{higher_geo_lay} polygon (Ratio).
#'
#' @examples
#' ## Run areal_calc() using the packages' dummy data sets.
#' ## The data sets are georeferenced on wgs84. However, a planar system is used to measure areas.
#' ## For the examples provided here, points and polygons relate to the United Kingdom.
#' ## So the British National Grid is used.
#'
#' ## Not run:
#' #outcome <- areal_calc(polygon_layer = pol_small,
#' #higher_geo_lay = pol_large,
#' #unique_id_code = "large_pol_",
#' #crs = "epsg:27700")
#' ## End(Not run)
#'
#'
#' @importFrom dplyr "%>%"
#'
#' @export
areal_calc <- function(polygon_layer,
higher_geo_lay,
unique_id_code,
crs) {
# we need a crs that is planar
crs = crs
# make sure that all layers have consistent CRS- in this case is WGS84
polygon_layer <- sf::st_transform(polygon_layer, crs)
higher_geo_lay <- sf::st_transform(higher_geo_lay, crs)
# calculate total area of the higher geography layer
higher_geo_lay$TotalArea <-
sf::st_area(higher_geo_lay$geometry)
# convert area of the higher geography layer to numeric too
higher_geo_lay$TotalArea <-
as.numeric(higher_geo_lay$TotalArea)
# assume that the attribute is constant throughout the geometry
sf::st_agr(polygon_layer) = "constant"
sf::st_agr(higher_geo_lay) = "constant"
#run the intersect function, converting the output to a tibble in the process
int <- dplyr::as_tibble(sf::st_intersection(polygon_layer, higher_geo_lay))
int$area <- sf::st_area(int$geometry)
# convert area to numeric
int$area <- as.numeric(int$area)
# remove polygons that are outside the grid boundaries to avoid getting errors
int <- int %>%
tidyr::drop_na(!!as.name(unique_id_code))
CoverByGeo <- int %>%
dplyr::group_by(!!as.name(unique_id_code)) %>% # '!!' this evaluates if it is true, when it is '!' evaluates if it is false
dplyr::summarise(AreaCovered = sum(area), .groups = 'drop_last')
# to calculate the ratio of area covered by the total area of the higher geography layer
combined_data <- dplyr::left_join(CoverByGeo, higher_geo_lay, by = unique_id_code)
combined_data$Ratio <- combined_data$AreaCovered / combined_data$TotalArea
results <- combined_data[,c(unique_id_code, "TotalArea", "AreaCovered", "Ratio")]
return(results)
}
|
\name{plusminus.fit}
\alias{plusminus.fit}
\title{PlusMinus (Mas-o-Menos)}
\description{Plus-Minus classifier}
\usage{plusminus.fit(XX, YY, ...)}
\arguments{
\item{XX}{ a matrix of observations. \code{NAs} and \code{Infs} are not allowed. }
\item{YY}{ a vector. \code{NAs} and \code{Infs} are not allowed. }
\item{\dots}{ additional arguments. Currently ignored. }
}
\details{This function should not be called directly, but through \code{plusminusFit} with the argument \code{method="plusminus"}. It implements the Plus-Minus algorithm.
}
\value{
An object of class \code{plusminus} is returned. The object contains all components returned by the underlying fit function. In addition, it contains the following:
\item{coefficients}{ regression coefficients }
\item{Y}{ response values }
\item{X}{ scaled predictors}
}
\author{Richard Baumgartner (\email{richard_baumgartner@merck.com}), Nelson Lee Afanador (\email{nelson.afanador@mvdalab.com})}
\references{
Zhao et al. (2014) Mas-o-menos: a simple sign averaging method for discriminationin genomic data analysis. Bioinformatics, 30(21):3062-3069.
}
\seealso{\code{\link{plusminusFit}}}
|
/man/plusminus.fit.Rd
|
no_license
|
cran/mvdalab
|
R
| false
| false
| 1,164
|
rd
|
\name{plusminus.fit}
\alias{plusminus.fit}
\title{PlusMinus (Mas-o-Menos)}
\description{Plus-Minus classifier}
\usage{plusminus.fit(XX, YY, ...)}
\arguments{
\item{XX}{ a matrix of observations. \code{NAs} and \code{Infs} are not allowed. }
\item{YY}{ a vector. \code{NAs} and \code{Infs} are not allowed. }
\item{\dots}{ additional arguments. Currently ignored. }
}
\details{This function should not be called directly, but through \code{plusminusFit} with the argument \code{method="plusminus"}. It implements the Plus-Minus algorithm.
}
\value{
An object of class \code{plusminus} is returned. The object contains all components returned by the underlying fit function. In addition, it contains the following:
\item{coefficients}{ regression coefficients }
\item{Y}{ response values }
\item{X}{ scaled predictors}
}
\author{Richard Baumgartner (\email{richard_baumgartner@merck.com}), Nelson Lee Afanador (\email{nelson.afanador@mvdalab.com})}
\references{
Zhao et al. (2014) Mas-o-menos: a simple sign averaging method for discriminationin genomic data analysis. Bioinformatics, 30(21):3062-3069.
}
\seealso{\code{\link{plusminusFit}}}
|
\name{summary.kohonen}
\alias{summary.kohonen}
\alias{print.kohonen}
\title{Summary and print methods for kohonen objects}
\description{
Summary and print methods for \code{kohonen} objects. The \code{print}
method shows the dimensions and the topology of the map; if
information on the training data is included, the \code{summary}
method additionally prints information on the size of the data, the
distance functions used, and the
mean distance of an object to its closest codebookvector, which is an
indication of the quality of the mapping.}
\usage{
\method{summary}{kohonen}(object, \dots)
\method{print}{kohonen}(x, \dots)
}
\arguments{
\item{x, object}{a \code{kohonen} object}
\item{\dots}{Not used.}
}
\author{Ron Wehrens}
\seealso{\code{\link{som}}, \code{\link{xyf}}, \code{\link{supersom}}}
\examples{
data(wines)
xyf.wines <- xyf(scale(wines), classvec2classmat(vintages),
grid = somgrid(5, 5, "hexagonal"))
xyf.wines
summary(xyf.wines)
}
\keyword{classif}
|
/man/summary.Rd
|
no_license
|
cran/kohonen
|
R
| false
| false
| 1,006
|
rd
|
\name{summary.kohonen}
\alias{summary.kohonen}
\alias{print.kohonen}
\title{Summary and print methods for kohonen objects}
\description{
Summary and print methods for \code{kohonen} objects. The \code{print}
method shows the dimensions and the topology of the map; if
information on the training data is included, the \code{summary}
method additionally prints information on the size of the data, the
distance functions used, and the
mean distance of an object to its closest codebookvector, which is an
indication of the quality of the mapping.}
\usage{
\method{summary}{kohonen}(object, \dots)
\method{print}{kohonen}(x, \dots)
}
\arguments{
\item{x, object}{a \code{kohonen} object}
\item{\dots}{Not used.}
}
\author{Ron Wehrens}
\seealso{\code{\link{som}}, \code{\link{xyf}}, \code{\link{supersom}}}
\examples{
data(wines)
xyf.wines <- xyf(scale(wines), classvec2classmat(vintages),
grid = somgrid(5, 5, "hexagonal"))
xyf.wines
summary(xyf.wines)
}
\keyword{classif}
|
# title: ltmake.r
# purpose: produce lifetables for CA burden project
# author: ethan sharygin (github:sharygin)
# notes:
# - intention is for analyst to be able to generate life tables by using default population + deaths,
# or inputting their own population + deaths.
# - ACS 5-yr datasets populations are weighted by age/sex to sum to CB PEP estimates from middle year.
# - ACS tract population tables: from B01001 = age/sex by tract; subtables by race/ethnicity.
# - combine years to get higher exposures for better tables:
# geo years (total) years (by race) agegroups by-characteristics
# state 1 1 0,1-4,5(5)85,199 GEOID,sex,race
# county 3 5 0,1-4,5(5)85,199 GEOID,sex,race
# mssa 5 NA 0(5)85,199 GEOID,sex
# - GEOID = unique geography level code, tract and higher: SSCCCTTTTTT where S=state fips, C=county fips, T=tract.
# - race schema: WNH BNH APINH H. exclude MR, AIAN, and combine A+PI.
# before 2000, no MR data, so issues in denominators for those years.
# issues in matching numerators/denominators. possible solution -- bridged race.
# - Census tracts changed between 2009-2010-2013. MSSA boundaries changed between 2009 and 2013.
# as a result, had to map 2000 and 2010 tracts both into 2013 MSSA boundaries.
# - MSSA issues: combined 0-4 age group combined + no tracts coded before 2005; 2005 onward about 3% missing.
# - unknown whether CA RESIDENCE criteria are correctly reflected in the death file.
# - dropped Bernoulli trials method for LTCI
# instructions:
# - set path
# - set options
# - required input files:
# (1) ACS population B01001* 2009-2017 5-year files by tract from NHGIS (acs5_B01001_tracts.dta)
# (2) DOF population 2000-09 and 2010-2018 by county from WWW (dof_ic00pc10v19.dta)
# (3) 2009 and 2010 tracts to 2013 MSSAs by ESRI ArcGIS from OSHPD+TIGER/LINE (trt00mssa13.dta + trt10mssa13.dta)
# (4) 2013 MSSA to county maps from OSHPD (mssa13cfips.dta)
# (5) a deaths microdata file, for example: cbdDat0SAMP.R, cbdDat0FULL.R, or dof_deaths_mi.dta
# TBD:
# - convert packaged inputs from stata to csv
# - repackage default deaths/population into a standard text format: e.g., HMD format.
# (easier for analysts to substitute their own deaths/population data)
# (use the readHMDHFD package to sideload txt files?)
# - calculate nqx from better data + package an included set of ax values.
## 1 SETUP ----------------------------------------------------------------------
## 1.1 packages
.pkg <- c("data.table","readr","readstata13","stringr","tidyr")
.inst <- .pkg %in% installed.packages()
if(length(.pkg[!.inst]) > 0) install.packages(.pkg[!.inst])
lapply(.pkg, require, character.only=TRUE)
## 1.2 options
controlPop <- TRUE # whether to control ACS to DOF pop totals
whichDeaths <- "real" # source of deaths data (real,fake,dof)
whichPop <- "pep" # source of population data (dof,pep)
critNx <- 10000
critDx <- 700
## 1.3 paths
#setwd("C:/Users/fieshary/projects/CACommunityBurden")
myDrive <- getwd()
myPlace <- paste0(myDrive,"/myCBD")
upPlace <- paste0(myDrive,"/myUpstream")
dofSecure <- "d:/users/fieshary/projects/vry-lt/dx"
mySecure <- "d:/0.Secure.Data/myData"
mySecure <- "G:/CCB/0.Secure.Data/myData"
mySecure <- "/mnt/projects/CCB/0.Secure.Data/myData"
## 1.4 links
#.ckey <- read_file(paste0(upPlace,"/upstreamInfo/census.api.key.txt")) # census API key
.nxacs <- ifelse(controlPop,
paste0(upPlace,"/lifeTables/dataIn/acs5_mssa_adj.dta"), # ACS tract pop, collapsed to MSSA and controlled to DOF county
paste0(upPlace,"/lifeTables/dataIn/acs5_mssa.dta") # ACS tract pop collapsed to MSSA
)
.trt00mssa <- paste0(upPlace,"/lifeTables/dataIn/trt00mssa13.dta") # 2009 TIGER/LINE census tracts to 2013 MSSAs
.trt10mssa <- paste0(upPlace,"/lifeTables/dataIn/trt10mssa13.dta") # 2010 TIGER/LINE census tracts to 2013 MSSAs
.mssacfips <- paste0(upPlace,"/lifeTables/dataIn/mssa13cfips.dta") # 2013 MSSA to county
.countycfips <- paste0(upPlace,"/lifeTables/dataIn/countycfips.dta") # county name to county FIPS in GEOID format
if (whichDeaths=="fake") .deaths <- paste0(upPlace,"/upData/cbdDat0SAMP.R")
if (whichDeaths=="real") .deaths <- paste0(mySecure,"/ccb_processed_deaths.RDS")
if (whichDeaths=="dof") .deaths <- paste0(dofSecure,"/dof_deaths_mi.dta")
if (whichPop=="dof") .pop <- paste0(upPlace,"/lifeTables/dataIn/dof_ic10pc19.dta")
if (whichPop=="pep") .pop <- paste0(upPlace,"/lifeTables/dataIn/pep_ic10pc18_special.dta")
## 2 GEOGRAPHY ----------------------------------------------------------------------
## 2.1 load tract to MSSA maps
trt00mssa<-setDT(read.dta13(.trt00mssa))
trt10mssa<-setDT(read.dta13(.trt10mssa))
mssacfips<-setDT(read.dta13(.mssacfips))
## 2.2 load county name to county FIPS code maps
countycfips<-setDT(read.dta13(.countycfips))
## 3 POPULATION ----------------------------------------------------------------------
## 3.1 load 2000-09 intercensal + 2010-18 postcensal county + state population
nx.county<-setDT(read.dta13(.pop))
nx.county<-rbind(nx.county, # sex+race detail
nx.county[,.(Nx=sum(Nx)),by=.(year,GEOID,agell,ageul)], # sex=TOTAL, race=TOTAL
nx.county[,.(Nx=sum(Nx)),by=.(year,GEOID,sex,agell,ageul)], # race7=TOTAL
nx.county[,.(Nx=sum(Nx)),by=.(year,GEOID,race7,agell,ageul)], # sex=TOTAL
use.names=TRUE,fill=TRUE,idcol=TRUE)
nx.county[.id==2, ':=' (sex="TOTAL",race7="TOTAL")]
nx.county[.id==3,race7:="TOTAL"]
nx.county[.id==4,sex:="TOTAL"]
nx.county[,.id:=NULL]
nx.county<-nx.county[GEOID!="06000000000"]
## state
nx.state<-copy(nx.county[,.(Nx=sum(Nx)),by=.(year,sex,race7,agell,ageul)])
nx.state[,GEOID:="06000000000"]
## 3.3 load ACS 2005-2015 five-year samples from NHGIS, rolled up to MSSA level
nxacs<-setDT(read.dta13(.nxacs))
nxacs[,race7:="TOTAL"]
nxacs<-rbind(nxacs,
nxacs[,.(Nx=sum(Nx)),by=.(year,comID,race7,agell,ageul)], # sex=TOTAL
use.names=TRUE,fill=TRUE,idcol=TRUE)
nxacs[.id==2,sex:="TOTAL"]
nxacs[,.id:=NULL]
## 4 DEATHS ---------------------------------------------------------------------------
## 4.1 load selected deaths master file
if (whichDeaths=="dof") setDT(dofdeaths<-read.dta13(.deaths))
if (whichDeaths=="fake") { load(.deaths); setDT(cbdDat0SAMP); cbddeaths<-cbdDat0SAMP }
if (whichDeaths=="real") { load(.deaths); setDT(cbdDat0FULL); cbddeaths<-cbdDat0FULL }
## 4.2 clean CBD deaths files
if (whichDeaths %in% c("real","fake")) {
## MSSA
dx.mssa<-copy(cbddeaths[sex %in% c("M","F") &
!is.na(age) & !is.na(year) &
as.numeric(substring(GEOID,1,5)) %in% 6001:6115]) # keep conditions
dx.mssa[,agell:=(5*floor(age/5))]
dx.mssa[agell>85,agell:=85]
dx.mssa[age<85,ageul:=agell+4]
dx.mssa[age>=85,ageul:=199]
dx.mssa[sex=="F",sex:="FEMALE"]
dx.mssa[sex=="M",sex:="MALE"]
dx.mssa<-merge(dx.mssa,trt10mssa,on=GEOID,all.x=TRUE) # merge tract->mssa; ONLY 2010 tracts are geocoded.
dx.mssa<-rbind(dx.mssa[,.(Dx=.N),by=.(year,comID,sex,agell,ageul)], # sex detail
dx.mssa[,.(Dx=.N),by=.(year,comID,agell,ageul)], # sex=TOTAL
use.names=TRUE,fill=TRUE,idcol=TRUE)
dx.mssa[.id==2,sex:="TOTAL"]
dx.mssa[,.id:=NULL]
dx.mssa[,race7:="TOTAL"]
## county
dx.county<-copy(cbddeaths[sex %in% c("M","F") &
!is.na(age) & !is.na(year) &
!is.na(county)]) # keep conditions
dx.county[age==0,agell:=0]
dx.county[age %in% 1:4,agell:=1]
dx.county[age>=5,agell:=(5*floor(age/5))]
dx.county[agell>85,agell:=85]
dx.county[agell==0,ageul:=0]
dx.county[agell==1,ageul:=4]
dx.county[agell %in% 5:80,ageul:=agell+4]
dx.county[age>=85,ageul:=199]
dx.county[sex=="F",sex:="FEMALE"]
dx.county[sex=="M",sex:="MALE"]
dx.county[raceCode=="AIAN-NH",race7:="AIAN_NH"]
dx.county[raceCode=="Asian-NH",race7:="ASIAN_NH"]
dx.county[raceCode=="Black-NH",race7:="BLACK_NH"]
dx.county[raceCode=="Hisp",race7:="HISPANIC"]
dx.county[raceCode=="Multi-NH",race7:="MR_NH"]
dx.county[raceCode=="NHPI-NH",race7:="NHPI_NH"]
dx.county[raceCode=="White-NH",race7:="WHITE_NH"]
dx.county[raceCode=="Other-NH",race7:="SOR_NH"]
dx.county<-merge(dx.county,countycfips,on=county,all.x=TRUE) # merge cname->GEOID
dx.county[,GEOID:=sprintf("%05d000000",cfips)]
dx.county<-rbind(dx.county[,.(Dx=.N),by=.(year,GEOID,sex,race7,agell,ageul)], # sex+race detail
dx.county[,.(Dx=.N),by=.(year,GEOID,agell,ageul)], # sex=TOTAL, race=TOTAL
dx.county[,.(Dx=.N),by=.(year,GEOID,sex,agell,ageul)], # race7=TOTAL
dx.county[,.(Dx=.N),by=.(year,GEOID,race7,agell,ageul)], # sex=TOTAL
use.names=TRUE,fill=TRUE,idcol=TRUE)
dx.county[.id==2, ':=' (sex="TOTAL",race7="TOTAL")]
dx.county[.id==3,race7:="TOTAL"]
dx.county[.id==4,sex:="TOTAL"]
dx.county[,.id:=NULL]
## state
dx.state<-copy(dx.county[,.(Dx=sum(Dx)),by=.(year,sex,race7,agell,ageul)])
dx.state[,GEOID:="06000000000"]
}
## 4.3 clean DOF deaths files
if (whichDeaths == "dof") {
dx.county<-copy(dofdeaths)
dx.county<-rbind(dx.county, # sex+race detail
dx.county[,.(Dx=sum(Dx)),by=.(year,GEOID,agell,ageul)], # sex=TOTAL, race=TOTAL
dx.county[,.(Dx=sum(Dx)),by=.(year,GEOID,sex,agell,ageul)], # race7=TOTAL
dx.county[,.(Dx=sum(Dx)),by=.(year,GEOID,race7,agell,ageul)], # sex=TOTAL
use.names=TRUE,fill=TRUE,idcol=TRUE)
dx.county[.id==2, ':=' (sex="TOTAL",race7="TOTAL")]
dx.county[.id==3,race7:="TOTAL"]
dx.county[.id==4,sex:="TOTAL"]
dx.county[,.id:=NULL]
## state
dx.state<-copy(dx.county[,.(Dx=sum(Dx)),by=.(year,sex,race7,agell,ageul)])
dx.state[,GEOID:="06000000000"]
}
## 5 MORTALITY ----------------------------------------------------------------------
## 5.1 function to generate an extract of years by geo and merge pop + deaths
## syntax: dx=deaths data, nx=pop data, nyrs=N neighborings years to combine, y=target year, level=geography
doExtract <- function(dx=NULL, nx=NULL, nyrs=NA, y=NA, level=NA) {
if (level=="mssa") {
dx[,GEOID:=comID]
nx[,GEOID:=comID]
}
if (length(unique(nx[year>=y-nyrs & year<=y+nyrs,year]))<(2*nyrs+1)) { stop("Exposure data are missing for one or more years") }
if (length(unique(dx[year>=y-nyrs & year<=y+nyrs,year]))<(2*nyrs+1)) { stop("Incidence data are missing for one or more years") }
tmp<-merge(nx[year>=y-nyrs & year<=y+nyrs],dx[year>=y-nyrs & year<=y+nyrs],
on=c('GEOID','sex','year','agell','ageul','race7'),
all.x=TRUE,all.y=TRUE) # merge pop+deaths (filtered years)
tmp<-tmp[,.(Nx=sum(Nx),Dx=sum(Dx)),by=c('GEOID','sex','agell','ageul','race7')] # collapse
tmp<-setDT(complete(tmp,GEOID,sex,race7,agell)) # (tidyr) rectangularize
tmp[is.na(Dx),Dx:=0] # convert implicit to explicit zero.
tmp[,year:=y] # recode year
if (level=="mssa") {
tmp[,comID:=GEOID]
tmp[,GEOID:=NULL]
dx[,GEOID:=NULL]
nx[,GEOID:=NULL]
}
return(tmp)
}
## 5.2 call doExtract for various geographies
## GEO by: sex/age race
## state 1 year 1yr
## county 3 yr 5yr
## mssa 5 yr -
#XXX INPUT DATES
## mssa
if (whichDeaths %in% c("real","fake")) {
range<-2009:2014 # or later if available. 'fake' has nx 2009-2018 and dx 2007-2014
range<-2009:2016 # or later if available. 'fake' has nx 2009-2018 and dx 2007-2014
mx.mssa<-data.table(do.call(rbind,lapply(range,doExtract,dx=dx.mssa,nx=nxacs,nyrs=2,level="mssa")))[,nyrs:=5]
}
## county
mx.county<-rbind( # combine 3-year TOTAL race, 5-year race7
data.table(do.call(rbind,lapply(2001:2017,doExtract,dx=dx.county,nx=nx.county,nyrs=1,level="county")))[race7=="TOTAL"][,nyrs:=3],
data.table(do.call(rbind,lapply(2002:2016,doExtract,dx=dx.county,nx=nx.county,nyrs=2,level="county")))[,nyrs:=5]
)
## state
mx.state<-data.table(do.call(rbind,lapply(2000:2018,doExtract,dx=dx.state,nx=nx.state,nyrs=0,level="state")))[,nyrs:=1]
#XXX testing
mx.state$pDead = 100*mx.state$Dx / mx.state$Nx
## 6 LIFE TABLES ----------------------------------------------------------------------
## 6.1 generic function to produce a life table
## x is a vector of age groups, nx is the corresponding vector of pop, dx of deaths
## sex is M or MALE or F or FEMALE (used to calc ax); ax is an optional vector of ax values
## previously estimated ax values are available from the UN WPP, USMDB, NCHS, including by race.
## values used here are from USMDB CA 1x10 or 5x10 (2010-17) by sex.
## also exports LTCI from Chiang's method with adjusted final age group
## - D. Eayres and E.S. Williams. 2004. "Evaluation of methodologies for small area life
## expectancy estimation". J Epi Com Health 58(3). http://dx.doi.org/10.1136/jech.2003.009654.
doLTChiangCI <- function(x, Nx, Dx, sex, ax=NULL, level=0.95) {
m <- length(x) # get position of final age group by length of age vector
mx <- Dx/Nx # mortality rate
n <- c(diff(x), NA) # n years between age groups
# calculate ax
if(is.null(ax)) { # if no ax values provided, use hardcoded CA 2010-17 by sex.
ax <- rep(0,m)
ax <- n/2 # rule of thumb: 1/2 age interval
# infant ages # from USMDB CA 5x10 life tables.
if (n[1]==1) ax[1]<-0.06
if (n[2]==4) ax[2]<-1.64
if (n[1]==5) ax[1]<-0.44
# final age interval
ax[m] <- 1 / mx[m] # rule of thumb: inverse of mx in final age interval
if (is.na(ax[m])) { # if cannot calculate, e.g. because mx==0
if(grepl("F",sex[1])) { # female
if (x[m]==85) ax[m]<-7.58
if (x[m]==90) ax[m]<-5.22
if (x[m]==100) ax[m]<-2.47
}
if(!grepl("F",sex[1]) & !grepl("T",sex[1])) { # male
if (x[m]==85) ax[m]<-6.54
if (x[m]==90) ax[m]<-4.50
if (x[m]==100) ax[m]<-2.22
}
if(grepl("T",sex[1])) { # total
if (x[m]==85) ax[m]<-7.19
if (x[m]==90) ax[m]<-4.97
if (x[m]==100) ax[m]<-2.42
}
}
}
# Chiang standard elements
qx <- n*mx / (1+(n-ax)*mx) # probablity of death (from mortality rate)
qx[m] <- 1 # 100% at oldest age group
px <- 1-qx # pr(survival)
lx <- cumprod(c(1,px))*100000 # 100,000 for radix
dx <- -diff(lx) # deaths each age interval
Lx <- n*lx[-1] + ax*dx # PY lived in this age group
lx <- lx[-(m+1)] # survivors
Lx[m] <- lx[m]/mx[m] # PY lived in final age group
Lx[is.na(Lx)|is.infinite(Lx)] <- 0 # in case of NA or Inf values from poorly formed LTs
Tx <- rev(cumsum(rev(Lx))) # cumulative PY lived at this age and above
ex <- Tx/lx # life expectancy at this age
# Chiang CI elements
zcrit=1-((1-level)/2) # CI from normal distribution
sp2<-((qx^2)*(1-qx))/Dx # variance of survival probability
sp2[is.na(sp2)]<-0 # fix zero deaths case
sp2[m]<-4/Dx[m]/mx[m]^2 # adjustment final age interval
wsp2<-lx^2*((1-(ax/n))*n+c(tail(ex,-1),NA))^2*sp2 # weighted SP2
wsp2[m]<-(lx[m]/2)^2*sp2[m] # adjustment final age interval
Twsp2<-rev(cumsum(rev(wsp2))) # sum of weighted sp2 rows below (like Tx)
se2<-Twsp2/lx^2 # sample variance of e0
exlow<-ex-qnorm(zcrit)*sqrt(se2) # CI low
exhigh<-ex+qnorm(zcrit)*sqrt(se2) # CI high
# return
return(data.table(x, n, Nx, Dx, mx, ax, qx, px, lx, dx, Lx, Tx, ex, sp2, wsp2, Twsp2, se2, exlow, exhigh))
}
## 6.2 add index to mx tables
mx.state[, i:=.GRP, by=c("nyrs","GEOID","sex","race7","year")]
setkeyv(mx.state,c("i","agell"))
mx.county[, i:=.GRP, by=c("nyrs","GEOID","sex","race7","year")]
setkeyv(mx.county,c("i","agell"))
if (whichDeaths %in% c("real","fake")) {
mx.mssa[, i:=.GRP, by=c("nyrs","comID","sex","race7","year")]
setkeyv(mx.mssa,c("i","agell"))
}
## 6.3 restrict using sum(Nx) & sum(Dx)
mx.state<-mx.state[, ':=' (sumNx=sum(Nx),sumDx=sum(Dx)), by=.(i)][sumNx>=critNx & sumDx>=critDx]
mx.county<-mx.county[, ':=' (sumNx=sum(Nx),sumDx=sum(Dx)), by=.(i)][sumNx>=critNx & sumDx>=critDx]
if (whichDeaths %in% c("real","fake")) {
mx.mssa<-mx.mssa[, ':=' (sumNx=sum(Nx),sumDx=sum(Dx)), by=.(i)][sumNx>=critNx & sumDx>=critDx]
}
## 6.4 Call LT routine by geography
## state
system.time({ lt.state<-mx.state[, doLTChiangCI(x=agell,Nx=Nx,Dx=Dx,sex=sex), by=c("i","nyrs","GEOID","sex","race7","year")] })
setkeyv(lt.state,c("i","x"))
## county
system.time({ lt.county<-mx.county[, doLTChiangCI(x=agell,Nx=Nx,Dx=Dx,sex=sex), by=c("i","nyrs","GEOID","sex","race7","year")] })
setkeyv(lt.county,c("i","x"))
## MSSA
if (whichDeaths %in% c("real","fake")) {
system.time({ lt.mssa<-mx.mssa[, doLTChiangCI(x=agell,Nx=Nx,Dx=Dx,sex=sex), by=c("i","nyrs","comID","sex","race7","year")] })
setkeyv(lt.mssa,c("i","x"))
}
## 7 REVIEW/EXPORT ----------------------------------------------------------------------
## 7.1 EXPORT
## full LT
saveRDS(lt.state,paste0(upPlace,"/lifeTables/dataOut/LTciState.rds"))
saveRDS(lt.county,paste0(upPlace,"/lifeTables/dataOut/LTciCounty.rds"))
if (whichDeaths %in% c("real","fake")) {
saveRDS(lt.mssa,paste0(upPlace,"/lifeTables/dataOut/LTciMSSA.rds"))
}
## e0 only
saveRDS(lt.state[x==0,c("nyrs","GEOID","sex","race7","year","ex","exlow","exhigh")],
paste0(upPlace,"/lifeTables/dataOut/e0ciState.rds"))
saveRDS(lt.county[x==0,c("nyrs","GEOID","sex","race7","year","ex","exlow","exhigh")],
paste0(upPlace,"/lifeTables/dataOut/e0ciCounty.rds"))
if (whichDeaths %in% c("real","fake")) {
saveRDS(lt.mssa[x==0,c("nyrs","comID","sex","race7","year","ex","exlow","exhigh")]
,paste0(upPlace,"/lifeTables/dataOut/e0ciMSSA.rds"))
}
## 7.2 Review
mx.state[sex=="TOTAL" & race7=="TOTAL",.(Nx=sum(Nx),Dx=sum(Dx)),by=c("GEOID","sex","year","race7")] # state sum
lt.state[x==0 & sex=="TOTAL" & race7=="TOTAL",c("GEOID","sex","year","race7","ex","exlow","exhigh")]
mx.county[sex=="TOTAL" & race7=="TOTAL",.(Nx=sum(Nx),Dx=sum(Dx)),by=c("nyrs","sex","year","race7")] # state sum
lt.county[x==0 & sex=="TOTAL" & race7=="TOTAL" & year==2017,
c("nyrs","GEOID","sex","year","race7","ex","exlow","exhigh")]
mx.mssa[sex=="TOTAL" & race7=="TOTAL",.(Nx=sum(Nx),Dx=sum(Dx)),by=c("sex","year","race7")] # state sum
lt.mssa[x==0 & sex=="TOTAL" & race7=="TOTAL" & (year %in% c(2010,2017)),
c("comID","sex","year","race7","ex","exlow","exhigh")]
## 7.3 NOTES ----------------------------------------------------------
# Life tables for communities, counties and states are generated from age specific
# mortality rates, which are the quotient of deaths during a calendar year to the
# and exposure, approximated by the population of the same age at the midpoint of
# the year (July 1). Age structured population data for tracts and communities are
# estimated using data from the American Community Survey, 5-year sample (table
# B01001; multiple years). County and state age population by age are estimated by
# the Demographic Research Unit, CA Department of Finance. Deaths data are based
# on 100% extracts from the vital statistics reporting system, CA Department of
# Public Health. Mortality and exposure data were combined for small groups:
# 5 years of combined population and mortality data for each annual community table,
# as well as to county tables by race. 3 years of combined data for county tables
# without race detail. Life tables with fewer than 700 deaths of 10,000 PY were
# censored. Intra-age mortality (nax) was calculated for ages below 5 using values
# from a similar population (CA life table for 2010-17 from USMDB) and by the
# midpoint of the age interval for other age groups except the last (1/mx or a
# value from USMDB if mx is zero or undefined). Standard errors were calculated
# for age specific probabilities of death and used to calculate 95% confidence
# intervals for life expectancy (Chiang 1984; Eayres and Williams 2004).
#
# United States Mortality DataBase. University of California, Berkeley (USA).
# Available at usa.mortality.org. Downloaded 2020-02-27.
#
# Chiang, C.L. 1984. The Life Table and its Applications. Robert E Krieger Publ Co., pp. 153-168.
#
# Eayres D, and E.S.E. Williams. Evaluation of methodologies for small area life expectancy estimation.
# Journal of Epidemiology & Community Health 2004;58:243-249.
|
/myUpstream/lifeTables/code/archive_DELETE_SOON/ltmaker-OLDER.r
|
no_license
|
mcSamuelDataSci/CACommunityBurden
|
R
| false
| false
| 20,010
|
r
|
# title: ltmake.r
# purpose: produce lifetables for CA burden project
# author: ethan sharygin (github:sharygin)
# notes:
# - intention is for analyst to be able to generate life tables by using default population + deaths,
# or inputting their own population + deaths.
# - ACS 5-yr datasets populations are weighted by age/sex to sum to CB PEP estimates from middle year.
# - ACS tract population tables: from B01001 = age/sex by tract; subtables by race/ethnicity.
# - combine years to get higher exposures for better tables:
# geo years (total) years (by race) agegroups by-characteristics
# state 1 1 0,1-4,5(5)85,199 GEOID,sex,race
# county 3 5 0,1-4,5(5)85,199 GEOID,sex,race
# mssa 5 NA 0(5)85,199 GEOID,sex
# - GEOID = unique geography level code, tract and higher: SSCCCTTTTTT where S=state fips, C=county fips, T=tract.
# - race schema: WNH BNH APINH H. exclude MR, AIAN, and combine A+PI.
# before 2000, no MR data, so issues in denominators for those years.
# issues in matching numerators/denominators. possible solution -- bridged race.
# - Census tracts changed between 2009-2010-2013. MSSA boundaries changed between 2009 and 2013.
# as a result, had to map 2000 and 2010 tracts both into 2013 MSSA boundaries.
# - MSSA issues: combined 0-4 age group combined + no tracts coded before 2005; 2005 onward about 3% missing.
# - unknown whether CA RESIDENCE criteria are correctly reflected in the death file.
# - dropped Bernoulli trials method for LTCI
# instructions:
# - set path
# - set options
# - required input files:
# (1) ACS population B01001* 2009-2017 5-year files by tract from NHGIS (acs5_B01001_tracts.dta)
# (2) DOF population 2000-09 and 2010-2018 by county from WWW (dof_ic00pc10v19.dta)
# (3) 2009 and 2010 tracts to 2013 MSSAs by ESRI ArcGIS from OSHPD+TIGER/LINE (trt00mssa13.dta + trt10mssa13.dta)
# (4) 2013 MSSA to county maps from OSHPD (mssa13cfips.dta)
# (5) a deaths microdata file, for example: cbdDat0SAMP.R, cbdDat0FULL.R, or dof_deaths_mi.dta
# TBD:
# - convert packaged inputs from stata to csv
# - repackage default deaths/population into a standard text format: e.g., HMD format.
# (easier for analysts to substitute their own deaths/population data)
# (use the readHMDHFD package to sideload txt files?)
# - calculate nqx from better data + package an included set of ax values.
## 1 SETUP ----------------------------------------------------------------------
## 1.1 packages
.pkg <- c("data.table","readr","readstata13","stringr","tidyr")
.inst <- .pkg %in% installed.packages()
if(length(.pkg[!.inst]) > 0) install.packages(.pkg[!.inst])
lapply(.pkg, require, character.only=TRUE)
## 1.2 options
controlPop <- TRUE # whether to control ACS to DOF pop totals
whichDeaths <- "real" # source of deaths data (real,fake,dof)
whichPop <- "pep" # source of population data (dof,pep)
critNx <- 10000
critDx <- 700
## 1.3 paths
#setwd("C:/Users/fieshary/projects/CACommunityBurden")
myDrive <- getwd()
myPlace <- paste0(myDrive,"/myCBD")
upPlace <- paste0(myDrive,"/myUpstream")
dofSecure <- "d:/users/fieshary/projects/vry-lt/dx"
mySecure <- "d:/0.Secure.Data/myData"
mySecure <- "G:/CCB/0.Secure.Data/myData"
mySecure <- "/mnt/projects/CCB/0.Secure.Data/myData"
## 1.4 links
#.ckey <- read_file(paste0(upPlace,"/upstreamInfo/census.api.key.txt")) # census API key
.nxacs <- ifelse(controlPop,
paste0(upPlace,"/lifeTables/dataIn/acs5_mssa_adj.dta"), # ACS tract pop, collapsed to MSSA and controlled to DOF county
paste0(upPlace,"/lifeTables/dataIn/acs5_mssa.dta") # ACS tract pop collapsed to MSSA
)
.trt00mssa <- paste0(upPlace,"/lifeTables/dataIn/trt00mssa13.dta") # 2009 TIGER/LINE census tracts to 2013 MSSAs
.trt10mssa <- paste0(upPlace,"/lifeTables/dataIn/trt10mssa13.dta") # 2010 TIGER/LINE census tracts to 2013 MSSAs
.mssacfips <- paste0(upPlace,"/lifeTables/dataIn/mssa13cfips.dta") # 2013 MSSA to county
.countycfips <- paste0(upPlace,"/lifeTables/dataIn/countycfips.dta") # county name to county FIPS in GEOID format
if (whichDeaths=="fake") .deaths <- paste0(upPlace,"/upData/cbdDat0SAMP.R")
if (whichDeaths=="real") .deaths <- paste0(mySecure,"/ccb_processed_deaths.RDS")
if (whichDeaths=="dof") .deaths <- paste0(dofSecure,"/dof_deaths_mi.dta")
if (whichPop=="dof") .pop <- paste0(upPlace,"/lifeTables/dataIn/dof_ic10pc19.dta")
if (whichPop=="pep") .pop <- paste0(upPlace,"/lifeTables/dataIn/pep_ic10pc18_special.dta")
## 2 GEOGRAPHY ----------------------------------------------------------------------
## 2.1 load tract to MSSA maps
trt00mssa<-setDT(read.dta13(.trt00mssa))
trt10mssa<-setDT(read.dta13(.trt10mssa))
mssacfips<-setDT(read.dta13(.mssacfips))
## 2.2 load county name to county FIPS code maps
countycfips<-setDT(read.dta13(.countycfips))
## 3 POPULATION ----------------------------------------------------------------------
## 3.1 load 2000-09 intercensal + 2010-18 postcensal county + state population
nx.county<-setDT(read.dta13(.pop))
nx.county<-rbind(nx.county, # sex+race detail
nx.county[,.(Nx=sum(Nx)),by=.(year,GEOID,agell,ageul)], # sex=TOTAL, race=TOTAL
nx.county[,.(Nx=sum(Nx)),by=.(year,GEOID,sex,agell,ageul)], # race7=TOTAL
nx.county[,.(Nx=sum(Nx)),by=.(year,GEOID,race7,agell,ageul)], # sex=TOTAL
use.names=TRUE,fill=TRUE,idcol=TRUE)
nx.county[.id==2, ':=' (sex="TOTAL",race7="TOTAL")]
nx.county[.id==3,race7:="TOTAL"]
nx.county[.id==4,sex:="TOTAL"]
nx.county[,.id:=NULL]
nx.county<-nx.county[GEOID!="06000000000"]
## state
nx.state<-copy(nx.county[,.(Nx=sum(Nx)),by=.(year,sex,race7,agell,ageul)])
nx.state[,GEOID:="06000000000"]
## 3.3 load ACS 2005-2015 five-year samples from NHGIS, rolled up to MSSA level
nxacs<-setDT(read.dta13(.nxacs))
nxacs[,race7:="TOTAL"]
nxacs<-rbind(nxacs,
nxacs[,.(Nx=sum(Nx)),by=.(year,comID,race7,agell,ageul)], # sex=TOTAL
use.names=TRUE,fill=TRUE,idcol=TRUE)
nxacs[.id==2,sex:="TOTAL"]
nxacs[,.id:=NULL]
## 4 DEATHS ---------------------------------------------------------------------------
## 4.1 load selected deaths master file
if (whichDeaths=="dof") setDT(dofdeaths<-read.dta13(.deaths))
if (whichDeaths=="fake") { load(.deaths); setDT(cbdDat0SAMP); cbddeaths<-cbdDat0SAMP }
if (whichDeaths=="real") { load(.deaths); setDT(cbdDat0FULL); cbddeaths<-cbdDat0FULL }
## 4.2 clean CBD deaths files
if (whichDeaths %in% c("real","fake")) {
## MSSA
dx.mssa<-copy(cbddeaths[sex %in% c("M","F") &
!is.na(age) & !is.na(year) &
as.numeric(substring(GEOID,1,5)) %in% 6001:6115]) # keep conditions
dx.mssa[,agell:=(5*floor(age/5))]
dx.mssa[agell>85,agell:=85]
dx.mssa[age<85,ageul:=agell+4]
dx.mssa[age>=85,ageul:=199]
dx.mssa[sex=="F",sex:="FEMALE"]
dx.mssa[sex=="M",sex:="MALE"]
dx.mssa<-merge(dx.mssa,trt10mssa,on=GEOID,all.x=TRUE) # merge tract->mssa; ONLY 2010 tracts are geocoded.
dx.mssa<-rbind(dx.mssa[,.(Dx=.N),by=.(year,comID,sex,agell,ageul)], # sex detail
dx.mssa[,.(Dx=.N),by=.(year,comID,agell,ageul)], # sex=TOTAL
use.names=TRUE,fill=TRUE,idcol=TRUE)
dx.mssa[.id==2,sex:="TOTAL"]
dx.mssa[,.id:=NULL]
dx.mssa[,race7:="TOTAL"]
## county
dx.county<-copy(cbddeaths[sex %in% c("M","F") &
!is.na(age) & !is.na(year) &
!is.na(county)]) # keep conditions
dx.county[age==0,agell:=0]
dx.county[age %in% 1:4,agell:=1]
dx.county[age>=5,agell:=(5*floor(age/5))]
dx.county[agell>85,agell:=85]
dx.county[agell==0,ageul:=0]
dx.county[agell==1,ageul:=4]
dx.county[agell %in% 5:80,ageul:=agell+4]
dx.county[age>=85,ageul:=199]
dx.county[sex=="F",sex:="FEMALE"]
dx.county[sex=="M",sex:="MALE"]
dx.county[raceCode=="AIAN-NH",race7:="AIAN_NH"]
dx.county[raceCode=="Asian-NH",race7:="ASIAN_NH"]
dx.county[raceCode=="Black-NH",race7:="BLACK_NH"]
dx.county[raceCode=="Hisp",race7:="HISPANIC"]
dx.county[raceCode=="Multi-NH",race7:="MR_NH"]
dx.county[raceCode=="NHPI-NH",race7:="NHPI_NH"]
dx.county[raceCode=="White-NH",race7:="WHITE_NH"]
dx.county[raceCode=="Other-NH",race7:="SOR_NH"]
dx.county<-merge(dx.county,countycfips,on=county,all.x=TRUE) # merge cname->GEOID
dx.county[,GEOID:=sprintf("%05d000000",cfips)]
dx.county<-rbind(dx.county[,.(Dx=.N),by=.(year,GEOID,sex,race7,agell,ageul)], # sex+race detail
dx.county[,.(Dx=.N),by=.(year,GEOID,agell,ageul)], # sex=TOTAL, race=TOTAL
dx.county[,.(Dx=.N),by=.(year,GEOID,sex,agell,ageul)], # race7=TOTAL
dx.county[,.(Dx=.N),by=.(year,GEOID,race7,agell,ageul)], # sex=TOTAL
use.names=TRUE,fill=TRUE,idcol=TRUE)
dx.county[.id==2, ':=' (sex="TOTAL",race7="TOTAL")]
dx.county[.id==3,race7:="TOTAL"]
dx.county[.id==4,sex:="TOTAL"]
dx.county[,.id:=NULL]
## state
dx.state<-copy(dx.county[,.(Dx=sum(Dx)),by=.(year,sex,race7,agell,ageul)])
dx.state[,GEOID:="06000000000"]
}
## 4.3 clean DOF deaths files
if (whichDeaths == "dof") {
dx.county<-copy(dofdeaths)
dx.county<-rbind(dx.county, # sex+race detail
dx.county[,.(Dx=sum(Dx)),by=.(year,GEOID,agell,ageul)], # sex=TOTAL, race=TOTAL
dx.county[,.(Dx=sum(Dx)),by=.(year,GEOID,sex,agell,ageul)], # race7=TOTAL
dx.county[,.(Dx=sum(Dx)),by=.(year,GEOID,race7,agell,ageul)], # sex=TOTAL
use.names=TRUE,fill=TRUE,idcol=TRUE)
dx.county[.id==2, ':=' (sex="TOTAL",race7="TOTAL")]
dx.county[.id==3,race7:="TOTAL"]
dx.county[.id==4,sex:="TOTAL"]
dx.county[,.id:=NULL]
## state
dx.state<-copy(dx.county[,.(Dx=sum(Dx)),by=.(year,sex,race7,agell,ageul)])
dx.state[,GEOID:="06000000000"]
}
## 5 MORTALITY ----------------------------------------------------------------------
## 5.1 function to generate an extract of years by geo and merge pop + deaths
## syntax: dx=deaths data, nx=pop data, nyrs=N neighborings years to combine, y=target year, level=geography
doExtract <- function(dx=NULL, nx=NULL, nyrs=NA, y=NA, level=NA) {
if (level=="mssa") {
dx[,GEOID:=comID]
nx[,GEOID:=comID]
}
if (length(unique(nx[year>=y-nyrs & year<=y+nyrs,year]))<(2*nyrs+1)) { stop("Exposure data are missing for one or more years") }
if (length(unique(dx[year>=y-nyrs & year<=y+nyrs,year]))<(2*nyrs+1)) { stop("Incidence data are missing for one or more years") }
tmp<-merge(nx[year>=y-nyrs & year<=y+nyrs],dx[year>=y-nyrs & year<=y+nyrs],
on=c('GEOID','sex','year','agell','ageul','race7'),
all.x=TRUE,all.y=TRUE) # merge pop+deaths (filtered years)
tmp<-tmp[,.(Nx=sum(Nx),Dx=sum(Dx)),by=c('GEOID','sex','agell','ageul','race7')] # collapse
tmp<-setDT(complete(tmp,GEOID,sex,race7,agell)) # (tidyr) rectangularize
tmp[is.na(Dx),Dx:=0] # convert implicit to explicit zero.
tmp[,year:=y] # recode year
if (level=="mssa") {
tmp[,comID:=GEOID]
tmp[,GEOID:=NULL]
dx[,GEOID:=NULL]
nx[,GEOID:=NULL]
}
return(tmp)
}
## 5.2 call doExtract for various geographies
## GEO by: sex/age race
## state 1 year 1yr
## county 3 yr 5yr
## mssa 5 yr -
#XXX INPUT DATES
## mssa
if (whichDeaths %in% c("real","fake")) {
range<-2009:2014 # or later if available. 'fake' has nx 2009-2018 and dx 2007-2014
range<-2009:2016 # or later if available. 'fake' has nx 2009-2018 and dx 2007-2014
mx.mssa<-data.table(do.call(rbind,lapply(range,doExtract,dx=dx.mssa,nx=nxacs,nyrs=2,level="mssa")))[,nyrs:=5]
}
## county
mx.county<-rbind( # combine 3-year TOTAL race, 5-year race7
data.table(do.call(rbind,lapply(2001:2017,doExtract,dx=dx.county,nx=nx.county,nyrs=1,level="county")))[race7=="TOTAL"][,nyrs:=3],
data.table(do.call(rbind,lapply(2002:2016,doExtract,dx=dx.county,nx=nx.county,nyrs=2,level="county")))[,nyrs:=5]
)
## state
mx.state<-data.table(do.call(rbind,lapply(2000:2018,doExtract,dx=dx.state,nx=nx.state,nyrs=0,level="state")))[,nyrs:=1]
#XXX testing
mx.state$pDead = 100*mx.state$Dx / mx.state$Nx
## 6 LIFE TABLES ----------------------------------------------------------------------
## 6.1 generic function to produce a life table
## x is a vector of age groups, nx is the corresponding vector of pop, dx of deaths
## sex is M or MALE or F or FEMALE (used to calc ax); ax is an optional vector of ax values
## previously estimated ax values are available from the UN WPP, USMDB, NCHS, including by race.
## values used here are from USMDB CA 1x10 or 5x10 (2010-17) by sex.
## also exports LTCI from Chiang's method with adjusted final age group
## - D. Eayres and E.S. Williams. 2004. "Evaluation of methodologies for small area life
## expectancy estimation". J Epi Com Health 58(3). http://dx.doi.org/10.1136/jech.2003.009654.
doLTChiangCI <- function(x, Nx, Dx, sex, ax=NULL, level=0.95) {
m <- length(x) # get position of final age group by length of age vector
mx <- Dx/Nx # mortality rate
n <- c(diff(x), NA) # n years between age groups
# calculate ax
if(is.null(ax)) { # if no ax values provided, use hardcoded CA 2010-17 by sex.
ax <- rep(0,m)
ax <- n/2 # rule of thumb: 1/2 age interval
# infant ages # from USMDB CA 5x10 life tables.
if (n[1]==1) ax[1]<-0.06
if (n[2]==4) ax[2]<-1.64
if (n[1]==5) ax[1]<-0.44
# final age interval
ax[m] <- 1 / mx[m] # rule of thumb: inverse of mx in final age interval
if (is.na(ax[m])) { # if cannot calculate, e.g. because mx==0
if(grepl("F",sex[1])) { # female
if (x[m]==85) ax[m]<-7.58
if (x[m]==90) ax[m]<-5.22
if (x[m]==100) ax[m]<-2.47
}
if(!grepl("F",sex[1]) & !grepl("T",sex[1])) { # male
if (x[m]==85) ax[m]<-6.54
if (x[m]==90) ax[m]<-4.50
if (x[m]==100) ax[m]<-2.22
}
if(grepl("T",sex[1])) { # total
if (x[m]==85) ax[m]<-7.19
if (x[m]==90) ax[m]<-4.97
if (x[m]==100) ax[m]<-2.42
}
}
}
# Chiang standard elements
qx <- n*mx / (1+(n-ax)*mx) # probablity of death (from mortality rate)
qx[m] <- 1 # 100% at oldest age group
px <- 1-qx # pr(survival)
lx <- cumprod(c(1,px))*100000 # 100,000 for radix
dx <- -diff(lx) # deaths each age interval
Lx <- n*lx[-1] + ax*dx # PY lived in this age group
lx <- lx[-(m+1)] # survivors
Lx[m] <- lx[m]/mx[m] # PY lived in final age group
Lx[is.na(Lx)|is.infinite(Lx)] <- 0 # in case of NA or Inf values from poorly formed LTs
Tx <- rev(cumsum(rev(Lx))) # cumulative PY lived at this age and above
ex <- Tx/lx # life expectancy at this age
# Chiang CI elements
zcrit=1-((1-level)/2) # CI from normal distribution
sp2<-((qx^2)*(1-qx))/Dx # variance of survival probability
sp2[is.na(sp2)]<-0 # fix zero deaths case
sp2[m]<-4/Dx[m]/mx[m]^2 # adjustment final age interval
wsp2<-lx^2*((1-(ax/n))*n+c(tail(ex,-1),NA))^2*sp2 # weighted SP2
wsp2[m]<-(lx[m]/2)^2*sp2[m] # adjustment final age interval
Twsp2<-rev(cumsum(rev(wsp2))) # sum of weighted sp2 rows below (like Tx)
se2<-Twsp2/lx^2 # sample variance of e0
exlow<-ex-qnorm(zcrit)*sqrt(se2) # CI low
exhigh<-ex+qnorm(zcrit)*sqrt(se2) # CI high
# return
return(data.table(x, n, Nx, Dx, mx, ax, qx, px, lx, dx, Lx, Tx, ex, sp2, wsp2, Twsp2, se2, exlow, exhigh))
}
## 6.2 add index to mx tables
mx.state[, i:=.GRP, by=c("nyrs","GEOID","sex","race7","year")]
setkeyv(mx.state,c("i","agell"))
mx.county[, i:=.GRP, by=c("nyrs","GEOID","sex","race7","year")]
setkeyv(mx.county,c("i","agell"))
if (whichDeaths %in% c("real","fake")) {
mx.mssa[, i:=.GRP, by=c("nyrs","comID","sex","race7","year")]
setkeyv(mx.mssa,c("i","agell"))
}
## 6.3 restrict using sum(Nx) & sum(Dx)
mx.state<-mx.state[, ':=' (sumNx=sum(Nx),sumDx=sum(Dx)), by=.(i)][sumNx>=critNx & sumDx>=critDx]
mx.county<-mx.county[, ':=' (sumNx=sum(Nx),sumDx=sum(Dx)), by=.(i)][sumNx>=critNx & sumDx>=critDx]
if (whichDeaths %in% c("real","fake")) {
mx.mssa<-mx.mssa[, ':=' (sumNx=sum(Nx),sumDx=sum(Dx)), by=.(i)][sumNx>=critNx & sumDx>=critDx]
}
## 6.4 Call LT routine by geography
## state
system.time({ lt.state<-mx.state[, doLTChiangCI(x=agell,Nx=Nx,Dx=Dx,sex=sex), by=c("i","nyrs","GEOID","sex","race7","year")] })
setkeyv(lt.state,c("i","x"))
## county
system.time({ lt.county<-mx.county[, doLTChiangCI(x=agell,Nx=Nx,Dx=Dx,sex=sex), by=c("i","nyrs","GEOID","sex","race7","year")] })
setkeyv(lt.county,c("i","x"))
## MSSA
if (whichDeaths %in% c("real","fake")) {
system.time({ lt.mssa<-mx.mssa[, doLTChiangCI(x=agell,Nx=Nx,Dx=Dx,sex=sex), by=c("i","nyrs","comID","sex","race7","year")] })
setkeyv(lt.mssa,c("i","x"))
}
## 7 REVIEW/EXPORT ----------------------------------------------------------------------
## 7.1 EXPORT
## full LT
saveRDS(lt.state,paste0(upPlace,"/lifeTables/dataOut/LTciState.rds"))
saveRDS(lt.county,paste0(upPlace,"/lifeTables/dataOut/LTciCounty.rds"))
if (whichDeaths %in% c("real","fake")) {
saveRDS(lt.mssa,paste0(upPlace,"/lifeTables/dataOut/LTciMSSA.rds"))
}
## e0 only
saveRDS(lt.state[x==0,c("nyrs","GEOID","sex","race7","year","ex","exlow","exhigh")],
paste0(upPlace,"/lifeTables/dataOut/e0ciState.rds"))
saveRDS(lt.county[x==0,c("nyrs","GEOID","sex","race7","year","ex","exlow","exhigh")],
paste0(upPlace,"/lifeTables/dataOut/e0ciCounty.rds"))
if (whichDeaths %in% c("real","fake")) {
saveRDS(lt.mssa[x==0,c("nyrs","comID","sex","race7","year","ex","exlow","exhigh")]
,paste0(upPlace,"/lifeTables/dataOut/e0ciMSSA.rds"))
}
## 7.2 Review
mx.state[sex=="TOTAL" & race7=="TOTAL",.(Nx=sum(Nx),Dx=sum(Dx)),by=c("GEOID","sex","year","race7")] # state sum
lt.state[x==0 & sex=="TOTAL" & race7=="TOTAL",c("GEOID","sex","year","race7","ex","exlow","exhigh")]
mx.county[sex=="TOTAL" & race7=="TOTAL",.(Nx=sum(Nx),Dx=sum(Dx)),by=c("nyrs","sex","year","race7")] # state sum
lt.county[x==0 & sex=="TOTAL" & race7=="TOTAL" & year==2017,
c("nyrs","GEOID","sex","year","race7","ex","exlow","exhigh")]
mx.mssa[sex=="TOTAL" & race7=="TOTAL",.(Nx=sum(Nx),Dx=sum(Dx)),by=c("sex","year","race7")] # state sum
lt.mssa[x==0 & sex=="TOTAL" & race7=="TOTAL" & (year %in% c(2010,2017)),
c("comID","sex","year","race7","ex","exlow","exhigh")]
## 7.3 NOTES ----------------------------------------------------------
# Life tables for communities, counties and states are generated from age specific
# mortality rates, which are the quotient of deaths during a calendar year to the
# and exposure, approximated by the population of the same age at the midpoint of
# the year (July 1). Age structured population data for tracts and communities are
# estimated using data from the American Community Survey, 5-year sample (table
# B01001; multiple years). County and state age population by age are estimated by
# the Demographic Research Unit, CA Department of Finance. Deaths data are based
# on 100% extracts from the vital statistics reporting system, CA Department of
# Public Health. Mortality and exposure data were combined for small groups:
# 5 years of combined population and mortality data for each annual community table,
# as well as to county tables by race. 3 years of combined data for county tables
# without race detail. Life tables with fewer than 700 deaths of 10,000 PY were
# censored. Intra-age mortality (nax) was calculated for ages below 5 using values
# from a similar population (CA life table for 2010-17 from USMDB) and by the
# midpoint of the age interval for other age groups except the last (1/mx or a
# value from USMDB if mx is zero or undefined). Standard errors were calculated
# for age specific probabilities of death and used to calculate 95% confidence
# intervals for life expectancy (Chiang 1984; Eayres and Williams 2004).
#
# United States Mortality DataBase. University of California, Berkeley (USA).
# Available at usa.mortality.org. Downloaded 2020-02-27.
#
# Chiang, C.L. 1984. The Life Table and its Applications. Robert E Krieger Publ Co., pp. 153-168.
#
# Eayres D, and E.S.E. Williams. Evaluation of methodologies for small area life expectancy estimation.
# Journal of Epidemiology & Community Health 2004;58:243-249.
|
# final_model_predictions.R
# settled on fit2 for publication.
# libraries -----------------------
library(brms)
library(dplyr)
library(ggplot2)
library(ggridges)
# Functions -----------------------
report.brmsfit<-function(x, file=NULL, type="word", digits=3, info=FALSE,
include_ic=FALSE){
sx<-summary(x)
random<-tryCatch(do.call(rbind, sx$random), error=function(e) NA)
if(!any(is.na(random))) rownames(random)<-paste(rownames(random),rep(names(sx$random), sapply(sx$random, nrow)), sep=" ")
if(include_ic){
loo<-eval(parse(text="brms::loo(x)"))
obj<-list(coefficients=setNames(sx$fixed[,1], rownames(sx$fixed)), se=sx$fixed[,2],
random=random, loo=setNames(c(loo$estimates[1,1], loo$estimates[1,2]), c("ELPD (PSIS-LOO)", "ELPD SE")),
Eff.Sample_min=sx$fixed[,5], Rhat_max=round(sx$fixed[,6],2))
output<-rbind(cbind(round(obj$coefficients,digits),round(obj$se,digits),obj$Eff.Sample_min,obj$Rhat_max),
if(!any(is.na(random))) {
cbind(round(random[,1:2, drop=FALSE], digits), round(random[,5:6, drop=FALSE], digits))
},
c(round(loo$estimates[1,1], digits), round(loo$estimates[1,2], digits),NA,NA))
rownames(output)[dim(output)[1]]<-"LOO"
}else{
obj<-list(coefficients=setNames(sx$fixed[,1], rownames(sx$fixed)), se=sx$fixed[,2], random=random,
Rhat=round(sx$fixed[,5],3), Bulk_ESS=sx$fixed[,6], Tail_ESS=sx$fixed[,7])
output<-rbind(cbind(round(obj$coefficients,digits),round(obj$se,digits),obj$Rhat,obj$Bulk_ESS,obj$Tail_ESS),
if(!any(is.na(random))) {
cbind(round(random[,1:2, drop=FALSE], digits), round(random[,5:7, drop=FALSE], digits))
})
}
if(!is.null(file)){
info <- if(info) deparse(getCall(x)) else NULL
suppressWarnings(clickR::make_table(output, file, type, info=info))
}
obj$output <- data.frame(output, check.names=FALSE, stringsAsFactors=FALSE)
class(obj) <- "reportmodel"
invisible(obj)
}
cbind.fill<-function(x,fill=NA){
x<-lapply(x,as.matrix)
n<-max(sapply(x,nrow))
do.call(cbind,lapply(x, function(f)
rbind(f, matrix(fill,n-nrow(f),ncol(f)))))
}
PCI <- function( samples , prob=0.9 ) {
#percentile interval from Rethinking
concat <- function( ... ) {
paste( ... , collapse="" , sep="" )
}
x <- sapply( prob , function(p) {
a <- (1-p)/2
quantile( samples , probs=c(a,1-a) )
} )
# now order inside-out in pairs
n <- length(prob)
result <- rep(0,n*2)
for ( i in 1:n ) {
low_idx <- n+1-i
up_idx <- n+i
# lower
result[low_idx] <- x[1,i]
# upper
result[up_idx] <- x[2,i]
# add names
a <- (1-prob[i])/2
names(result)[low_idx] <- concat(round(a*100,0),"%")
names(result)[up_idx] <- concat(round((1-a)*100,0),"%")
}
return(result)
}
# FUNCTIIONS FOR PLOTTING---------------
wrap.it <- function(x, len)
{
sapply(x, function(y) paste(strwrap(y, len),
collapse = "\n"),
USE.NAMES = FALSE)
}
# Call this function with a list or vector
wrap.labels <- function(x, len)
{
if (is.list(x))
{
lapply(x, wrap.it, len)
} else {
wrap.it(x, len)
}
}
# Main Code -------------------------------------------
# read in model
fit1 <- readRDS("fit1.rds")
fit2 <- readRDS("fit2.rds")
parname_dict <- read.csv("parname_dict.csv")
# write summary table
report.brmsfit(fit1, file = "./output/fit_main_effects",
type = "word", digits = 3, info=FALSE, include_ic=FALSE)
report.brmsfit(fit2, file = "./output/fit_final_model",
type = "word", digits = 3, info=FALSE, include_ic=FALSE)
# - Make variable name dictionary ------------------
varname_dict = names(fit2$data)
names(varname_dict) = varname_dict
tmp.rename.func <- function(varname_dict, var, newname) {
x = which(varname_dict == var)
if (length(x) > 0) {varname_dict[x] = newname}
varname_dict
}
varname_dict = tmp.rename.func(varname_dict, "comfort_rating_ordered", "comfort rating")
varname_dict = tmp.rename.func(varname_dict, "female1", "Woman")
varname_dict = tmp.rename.func(varname_dict, "child_u18TRUE", "HH w/ child")
varname_dict = tmp.rename.func(varname_dict, "age", "Age")
varname_dict = tmp.rename.func(varname_dict, "VideoGroupWithin", "'Within' experimental group")
varname_dict = tmp.rename.func(varname_dict, "op_like_biking", "like riding a bike")
varname_dict = tmp.rename.func(varname_dict, "op_need_car", "need car for activities")
varname_dict = tmp.rename.func(varname_dict, "op_feel_safe", "feel safe biking on campus")
varname_dict = tmp.rename.func(varname_dict, "op_like_transit", "like using public transit")
varname_dict = tmp.rename.func(varname_dict, "op_arrive_professional", "job needs professional attire")
varname_dict = tmp.rename.func(varname_dict, "op_travel_stress", "travelling to campus is stressful")
varname_dict = tmp.rename.func(varname_dict, "bike_ability", "confidence level riding a bike")
varname_dict = tmp.rename.func(varname_dict, "comfort_four_no_lane2", "willing to bike on 4-lane road")
varname_dict = tmp.rename.func(varname_dict, "comfort_four_no_lane3", "comfortable biking on 4-lane road")
varname_dict = tmp.rename.func(varname_dict, "usual_mode_4levBike", "usually commute to campus by bike")
varname_dict = tmp.rename.func(varname_dict, "street_parking_ST1", "street parking")
varname_dict = tmp.rename.func(varname_dict, "outside_lane_width_ft_ST", "outside lane width")
varname_dict = tmp.rename.func(varname_dict, "veh_volume2_ST2", "low volume")
varname_dict = tmp.rename.func(varname_dict, "veh_volume2_ST3", "high volume")
varname_dict = tmp.rename.func(varname_dict, "bike_operating_space_ST", "bike operating space")
varname_dict = tmp.rename.func(varname_dict, "bike_lane_SUM_ST1", "bike lane, no buffer")
varname_dict = tmp.rename.func(varname_dict, "bike_lane_SUM_ST2", "bike lane, with buffer")
varname_dict = tmp.rename.func(varname_dict, "speed_prevail_minus_limit_ST", "prevailing minus posted speed")
varname_dict = tmp.rename.func(varname_dict, "speed_limit_mph_ST_3lev.30.40.", "speed limit [30,40)")
varname_dict = tmp.rename.func(varname_dict, "speed_limit_mph_ST_3lev.40.50.", "speed limit [40,50]")
varname_dict = tmp.rename.func(varname_dict, "veh_vol_non0_opspace_0_ST", "bikes share space with cars")
varname_dict = tmp.rename.func(varname_dict, "person_ID", "person ID")
varname_dict = tmp.rename.func(varname_dict, "video_name", "video name")
#more we'll need below
varname_dict2 = setNames(nm = c("ability_comfort", "road_environment", "id", "attitude",
"b_Intercept\\[1\\]", "b_Intercept\\[2\\]", "b_Intercept\\[3\\]",
"b_Intercept\\[4\\]", "b_Intercept\\[5\\]", "b_Intercept\\[6\\]",
"sd_person_ID__Intercept", "sd_video_name__Intercept"),
c("biking comfort", "road environment", "id", "transit attitudes",
"Intercept 1 ", "Intercept 2", "Intercept 3",
"Intercept 4", "Intercept 5", "Intercept 6",
"SD person ID Intercept", "SD video name Intercept"))
varname_dict = c(varname_dict, varname_dict2)
rm(varname_dict2)
# reorder varname_dict for plotting
varname_dict <- varname_dict[c(1,2,20,24,3:19,21:23,25:39)]
# - make parameter plots ----------
post1 <-posterior_samples(fit1)[1:35]
post1$model <- "Main Effects"
post2 <-posterior_samples(fit2)[1:42]
post2$model <- "Final"
d.plot <- plyr::rbind.fill(post1,post2)
d.plot <- reshape2::melt(d.plot,"model")
d.plot$variable <- plyr::mapvalues(d.plot$variable,from=parname_dict$pname,to=parname_dict$label)
new.order <-
dplyr::inner_join(data.frame(label=levels(d.plot$variable),old_order=1:length(unique(d.plot$variable))),
parname_dict,by="label") %>%
arrange(desc(order))
d.plot$variable <- factor(d.plot$variable,levels=levels(d.plot$variable)[new.order$old_order])
# wrap long labels
levels(d.plot$variable) <- wrap.labels(levels(d.plot$variable),35)
d.plot$model <- factor(d.plot$model)
d.plot$model <- factor(d.plot$model,level=levels(d.plot$model)[c(2,1)])
png(file="output/Figure5a.png",width=6.5,height=9,units="in",res=900,pointsize = 4)
ggplot(d.plot[d.plot$variable %in% levels(d.plot$variable)[42:20],], aes(x = value, y = variable)) +
coord_cartesian(xlim = c(-2.5,5.5))+
geom_density_ridges(scale = 1.2, rel_min_height=0.01) +
geom_hline(yintercept=9)+
geom_hline(yintercept=14)+
geom_hline(yintercept=18)+
geom_vline(xintercept=0,linetype="dashed")+
labs(x = "Parameter (cumulative logit scale)") +
facet_wrap(~model,nrow=1,labeller = label_wrap_gen(width = 40, multi_line = TRUE))+
theme(axis.title.y=element_blank(),
text = element_text(size=12))
dev.off()
png(file="output/Figure5b.png",width=6.5,height=9,units="in",res=900,pointsize = 4)
ggplot(d.plot[d.plot$variable %in% levels(d.plot$variable)[21:1],], aes(x = value, y = variable)) +
coord_cartesian(xlim = c(-2.5,5.5))+
geom_density_ridges(scale = 1.2, rel_min_height=0.01) +
geom_hline(yintercept=8)+
geom_hline(yintercept=19)+
geom_vline(xintercept=0,linetype="dashed")+
labs(x = "Parameter (cumulative logit scale)") +
facet_wrap(~model,nrow=1,labeller = label_wrap_gen(width = 40, multi_line = TRUE))+
theme(axis.title.y=element_blank(),
text = element_text(size=12))
dev.off()
# Setup conditions for predictive plots ---------------------------
# summary
str(fit2$data)
names(fit2$data)
d.model <- readRDS("data_for_models_nonscaled.RDS")
# create by-person data frame for finding quantiles accurately
d.scenario = fit2$data %>% group_by(person_ID) %>%
dplyr::select(-c("comfort_rating_ordered","video_name")) %>% summarize_all(first)
# Building blocks ----
# - Individual-level ----
# + attitudes ----
op_levels = data.frame(apply(d.scenario[,-1], 2, quantile, probs = c(.1,.5,.9)))
low_pos_attitudes = c(op_like_biking = op_levels$op_like_biking[1], op_feel_safe = op_levels$op_feel_safe[1], op_like_transit = op_levels$op_like_transit[1])
mid_pos_attitudes = c(op_like_biking = op_levels$op_like_biking[2], op_feel_safe = op_levels$op_feel_safe[2], op_like_transit = op_levels$op_like_transit[2])
high_pos_attitudes = c(op_like_biking = op_levels$op_like_biking[3], op_feel_safe = op_levels$op_feel_safe[3], op_like_transit = op_levels$op_like_transit[3])
low_neg_attitudes = c(op_need_car = op_levels$op_need_car[1], op_arrive_professional = op_levels$op_arrive_professional[1], op_travel_stress = op_levels$op_travel_stress[1])
mid_neg_attitudes = c(op_need_car = op_levels$op_need_car[2], op_arrive_professional = op_levels$op_arrive_professional[2], op_travel_stress = op_levels$op_travel_stress[2])
high_neg_attitudes = c(op_need_car = op_levels$op_need_car[3], op_arrive_professional = op_levels$op_arrive_professional[3], op_travel_stress = op_levels$op_travel_stress[3])
bad_attitudes = c(low_pos_attitudes, high_neg_attitudes)
mid_attitudes = c(mid_pos_attitudes, mid_neg_attitudes)
good_attitudes = c(high_pos_attitudes, low_neg_attitudes)
# + ability + comfort ----
low_ability_comfort = data.frame(comfort_four_no_lane2=0, comfort_four_no_lane3=0, bike_ability=.5, usual_mode_4levBike = 0) #somewhat confident, low comfort
mid_ability_comfort = data.frame(comfort_four_no_lane2=1, comfort_four_no_lane3=0, bike_ability=.5, usual_mode_4levBike = 0) #somewaht confident, moderate comfort
high_ability_comfort = data.frame(comfort_four_no_lane2=0, comfort_four_no_lane3=1, bike_ability=1, usual_mode_4levBike = 1) #very confident, high comfort
# + demographic ----
agelevels = quantile(d.scenario$age , c(.1,.8,.95))
# On real scale
agelevels*(max(d.model$age,na.rm=T) - min(d.model$age,na.rm=T)) + min(d.model$age,na.rm=T)
# 10% 80% 95%
# 20 34 57
young_childless_male = data.frame(age = agelevels[1], child_u18TRUE = 0, female1 = 0)
midage_child_female = data.frame(age = agelevels[2], child_u18TRUE = 1, female1 = 1)
old_childless_male = data.frame(age = agelevels[3], child_u18TRUE = 0, female1 = 0)
old_childless_female = data.frame(age = agelevels[3], child_u18TRUE = 0, female1 = 1)
# - Road environment ----
speed_prevail_levels = quantile(fit2$data$speed_prevail_minus_limit_ST, c(.05,.5,.95))
speed_prevail_levels*(max(d.model$speed_prevail_minus_limit_ST) - min(d.model$speed_prevail_minus_limit_ST)) + min(d.model$speed_prevail_minus_limit_ST)
# 5% 50% 95%
# -10 0 5
outside_lane_levels = quantile(fit2$data$outside_lane_width_ft_ST, c(.05,.5,.95))
outside_lane_levels*(max(d.model$outside_lane_width_ft_ST) - min(d.model$outside_lane_width_ft_ST)) + min(d.model$outside_lane_width_ft_ST)
# 5% 50% 95%
# 9 11 13
bike_space_levels = quantile(fit2$data$bike_operating_space_ST, c(.05,.5,.95))
bike_space_levels*(max(d.model$bike_operating_space_ST) - min(d.model$bike_operating_space_ST)) + min(d.model$bike_operating_space_ST)
# 5% 50% 95%
# 0 5 11
collector_good = data.frame(veh_volume2_ST2 = 0, veh_volume2_ST3 = 0,
speed_limit_mph_ST_3lev.30.40. = 0, speed_limit_mph_ST_3lev.40.50. = 0,
bike_lane_SUM_ST1 = 0, bike_lane_SUM_ST2 = 1,
speed_prevail_minus_limit_ST = speed_prevail_levels[1],
street_parking_ST1 = 1,
outside_lane_width_ft_ST = outside_lane_levels[1],
bike_operating_space_ST = bike_space_levels[3],
veh_vol_non0_opspace_0_ST = 0)
collector_mid = data.frame(veh_volume2_ST2 = 1, veh_volume2_ST3 = 0,
speed_limit_mph_ST_3lev.30.40. = 1, speed_limit_mph_ST_3lev.40.50. = 0,
bike_lane_SUM_ST1 = 1, bike_lane_SUM_ST2 = 0,
speed_prevail_minus_limit_ST = speed_prevail_levels[2],
street_parking_ST1 = 1,
outside_lane_width_ft_ST = outside_lane_levels[2],
bike_operating_space_ST = bike_space_levels[2],
veh_vol_non0_opspace_0_ST = 0)
collector_bad = data.frame(veh_volume2_ST2 = 0, veh_volume2_ST3 = 1,
speed_limit_mph_ST_3lev.30.40. = 1, speed_limit_mph_ST_3lev.40.50. = 0,
bike_lane_SUM_ST1 = 0, bike_lane_SUM_ST2 = 0,
speed_prevail_minus_limit_ST = speed_prevail_levels[3],
street_parking_ST1 = 1,
outside_lane_width_ft_ST = outside_lane_levels[3],
bike_operating_space_ST = bike_space_levels[1],
veh_vol_non0_opspace_0_ST = 1)
arterial_good = data.frame(veh_volume2_ST2 = 0, veh_volume2_ST3 = 1,
speed_limit_mph_ST_3lev.30.40. = 1, speed_limit_mph_ST_3lev.40.50. = 0,
bike_lane_SUM_ST1 = 0, bike_lane_SUM_ST2 = 1,
speed_prevail_minus_limit_ST = speed_prevail_levels[1],
street_parking_ST1 = 1, # all streets have street parking
outside_lane_width_ft_ST = outside_lane_levels[1], #<- outside lane width effect is neg
bike_operating_space_ST = bike_space_levels[3],
veh_vol_non0_opspace_0_ST = 0)
arterial_mid = data.frame(veh_volume2_ST2 = 0, veh_volume2_ST3 = 1,
speed_limit_mph_ST_3lev.30.40. = 0, speed_limit_mph_ST_3lev.40.50. = 1,
bike_lane_SUM_ST1 = 1, bike_lane_SUM_ST2 = 0,
speed_prevail_minus_limit_ST = speed_prevail_levels[2],
street_parking_ST1 = 1, # all streets have street parking
outside_lane_width_ft_ST = outside_lane_levels[2],
bike_operating_space_ST = bike_space_levels[2],
veh_vol_non0_opspace_0_ST = 0)
arterial_bad = data.frame(veh_volume2_ST2 = 0, veh_volume2_ST3 = 1,
speed_limit_mph_ST_3lev.30.40. = 0, speed_limit_mph_ST_3lev.40.50. = 1,
bike_lane_SUM_ST1 = 0, bike_lane_SUM_ST2 = 0,
speed_prevail_minus_limit_ST = speed_prevail_levels[3],
street_parking_ST1 = 1, # all streets have street parking
outside_lane_width_ft_ST = outside_lane_levels[3],
bike_operating_space_ST = bike_space_levels[1],
veh_vol_non0_opspace_0_ST = 1)
attitudes = data.frame(rbind(bad_attitudes, mid_attitudes, good_attitudes),
id = 1, attitude = as.factor(c("bad_attitude", "mid_attitude", "good_attitude")))
attitudes$attitude = ordered(attitudes$attitude, levels(attitudes$attitude)[c(1,3,2)])
ability_comfort = data.frame(rbind(low_ability_comfort, mid_ability_comfort, high_ability_comfort),
id = 1, ability_comfort = as.factor(c("low_comfort", "mid_comfort", "high_comfort")))
ability_comfort$ability_comfort = ordered(ability_comfort$ability_comfort, levels(ability_comfort$ability_comfort)[c(2,3,1)])
road_environments = data.frame(rbind(collector_bad, collector_mid, collector_good,
arterial_bad, arterial_mid, arterial_good),
id = 1, road_environment = c("collector_bad", "collector_mid", "collector_good",
"arterial_bad", "arterial_mid", "arterial_good"))
road_environments$road_environment = ordered(road_environments$road_environment,
levels = c("arterial_bad", "arterial_mid", "arterial_good", "collector_bad", "collector_mid", "collector_good"))
person = data.frame(rbind(young_childless_male, #midage_child_female, old_childless_female,
old_childless_male),
id = 1, person = c("20yr_man", #"midage_child_female", "old_childless_male",
"57yr_woman"))
all_counterfactuals = plyr::join_all(list(attitudes, ability_comfort, road_environments, person), by='id', type='full' )
all_counterfactuals$rowID = 1:nrow(all_counterfactuals)
building_blocks = list(attitudes = attitudes, ability_comfort = ability_comfort, road_environments = road_environments)
sapply(building_blocks, function(x) {names(x) = varname_dict[names(x)]; x})
# + Add interactions ----
interaction.terms = trimws(strsplit(as.character(fit2$formula[[1]][3][1]), " \\+ ")[[1]])
interaction.terms = interaction.terms[grepl(":",interaction.terms)]
interactions = data.frame(do.call("cbind", lapply(interaction.terms, function(term) {
tmp = all_counterfactuals %>% dplyr::select(names(all_counterfactuals)[sapply(names(all_counterfactuals), grepl, x = term)])
apply(tmp, 1, prod)
})))
names(interactions) = interaction.terms
all_counterfactuals = data.frame(all_counterfactuals, interactions)
# simplified plot for me_per_vid for report ------------
x = rbind(data.frame(c(bad_attitudes, low_ability_comfort, old_childless_female, collector_bad)),
data.frame(c(bad_attitudes, low_ability_comfort, old_childless_female, collector_mid)),
data.frame(c(bad_attitudes, low_ability_comfort, old_childless_female, collector_good)),
data.frame(c(bad_attitudes, low_ability_comfort, old_childless_female, arterial_bad)),
data.frame(c(bad_attitudes, low_ability_comfort, old_childless_female, arterial_mid)),
data.frame(c(bad_attitudes, low_ability_comfort, old_childless_female, arterial_good)))
x$class <- c("bad_attitude.low_comfort.collector_bad",
"bad_attitude.low_comfort.collector_mid",
"bad_attitude.low_comfort.collector_good",
"bad_attitude.low_comfort.arterial_bad",
"bad_attitude.low_comfort.arterial_mid",
"bad_attitude.low_comfort.arterial_good")
x$VideoGroupWithin <- rep(0,nrow(x))
# general predictive plots for interactions -----------------------
newdata <- rbind(x[rep(2,9),])
newdata$age <- rep(agelevels,3)
newdata$bike_operating_space_ST <- rep(bike_space_levels,each=3)
newdata2 <- newdata
newdata2$comfort_four_no_lane3 <- 1
newdata<- rbind(newdata,newdata2)
newdata$age_class <- newdata$age*(max(d.model$age,na.rm=T) - min(d.model$age,na.rm=T)) + min(d.model$age,na.rm=T)
newdata$comfort_four_no_lane3_class <- c(rep("NOT comfortable on mixed arterial",9),rep("Comfortable on mixed arterial",9))
newdata$bike_operating_space_ST_class <- newdata$bike_operating_space_ST*(max(d.model$bike_operating_space_ST) - min(d.model$bike_operating_space_ST)) + min(d.model$bike_operating_space_ST)
# predict with data to show interactions
pk <- posterior_epred(fit2,newdata=newdata,allow_new_level=T,sample_new_levels="gaussian")
d.plot <- data.frame(scenario = rep(1:18,each=7),
age=rep(paste(newdata$age_class,"yo"),each=7),
comfort=rep(newdata$comfort_four_no_lane3_class,each=7),
bike_space=rep(newdata$bike_operating_space_ST_class,each=7),
class = rep(sort(unique(fit2$data$comfort_rating_ordered)),length(newdata$class)),
p.mean = NA,
p.lwr = NA,
p.upr = NA
)
for(s in 1:length(unique(d.plot$scenario))){
d.plot[d.plot$scenario==unique(d.plot$scenario)[s],"p.mean"] <- apply(pk[,s,],2,mean)
PI <- apply(pk[,s,],2,PCI,.9)
d.plot[d.plot$scenario==unique(d.plot$scenario)[s],"p.lwr"] <- PI[1,]
d.plot[d.plot$scenario==unique(d.plot$scenario)[s],"p.upr"] <- PI[2,]
}
# age plot
png(file="output/Figure7.png",width=6.5,height=3,units="in",res=900,pointsize = 8)
ggplot(d.plot[d.plot$comfort=="NOT comfortable on mixed arterial",],aes(x=p.mean,y=class))+
geom_ribbon(aes(xmin=p.lwr,xmax=p.upr,group=scenario,fill=as.factor(bike_space)),alpha=.2)+
geom_path(aes(group=scenario,color=as.factor(bike_space)))+
geom_point(aes(color=as.factor(bike_space)))+
coord_cartesian(xlim=c(0,.8))+
xlab("Predicted Probability")+
ylab("")+
facet_grid(~age)+
guides(fill=guide_legend(title="Bike operating space (ft)"),
color=guide_legend(title="Bike operating space (ft)"))+
theme(legend.position="top")
dev.off()
# comfort plot
png(file="output/Figure8.png",width=6.5,height=3,units="in",res=900,pointsize = 8)
ggplot(d.plot[d.plot$age=="33 yo",],aes(x=p.mean,y=class))+
geom_ribbon(aes(xmin=p.lwr,xmax=p.upr,group=scenario,fill=as.factor(bike_space)),alpha=.2)+
geom_path(aes(group=scenario,color=as.factor(bike_space)))+
geom_point(aes(color=as.factor(bike_space)))+
coord_cartesian(xlim=c(0,.8))+
xlab("Predicted Probability")+
ylab("")+
facet_grid(~comfort)+
guides(fill=guide_legend(title="Bike operating space (ft)"),
color=guide_legend(title="Bike operating space (ft)"))+
theme(legend.position="top")
dev.off()
# scenario predictive plots ---------------------
# # predict for each sample the response while considering new videos and new people
# # I.e. include the uncertainty from videos and people in the predictions
# n=10
# pred.cumsum <- array(0, dim = c(3, nrow(x), n))
# for(i in 1:n){
# pred <- predict(fit2, newdata = x,summary=T,
# allow_new_level=T, sample_new_levels="gaussian")
# tmp <- apply(pred,2,table)
# if(is.list(tmp)){
# tmp <- sapply(1:length(tmp),function(x) as.vector(tmp[[x]]))
# tmp <- cbind.fill(tmp,fill=0)
# }
# pred.cumsum[,,i] <- rbind(colSums(tmp[5:7,]),colSums(tmp[6:7,]),tmp[7,])
# }
# pred.cumsum <- apply(pred.cumsum,c(2,3),function(x) x/nrow(pred))
# pred.mean <- apply(pred.cumsum,c(1,2),mean)
# pred.PI <- apply(pred.cumsum,c(1,2),PCI,prob=.95)
#
# pred.plot <- data.frame(class=rep(x$class,each=3),
# road = c(rep("collector",9),rep("arterial",9)),
# scenario = rep(rep(c("poor","average","best"),each=3),2),
# comfort = rep(c("At least slightly comfortable",
# "At least moderatly comfortable",
# "Very comfortable"),3),
# Estimate = as.vector(pred.mean),
# Q2.5 = as.vector(pred.PI[1,,]),
# Q97.5 = as.vector(pred.PI[2,,]))
# pred.plot$comfort <- factor(pred.plot$comfort, levels = c("At least slightly comfortable",
# "At least moderatly comfortable",
# "Very comfortable"))
# pred.plot$scenario <- factor(pred.plot$scenario, levels = c("poor","average","best"))
#
# #Collector
# ggplot( pred.plot, aes(comfort, Estimate)) +
# geom_point(size=.8) +
# #geom_line(aes(group=class)) +
# geom_errorbar(aes(ymin = Q2.5, ymax = Q97.5), width = 0.2)+
# facet_grid(scenario~road)+#, ncol = 3, strip.position = "top") +
# theme_bw() +
# coord_flip()+
# ylab("Predicted proportion of responses")+
# #ggtitle(paste("Collectors, ", model_name)) +
# theme(strip.text = element_text(size = 8),
# axis.title.y = element_blank())
# Alternative (that I like better) ------------
pk <- posterior_epred(fit2,newdata=x,allow_new_level=T,sample_new_levels="gaussian")
d.plot <- data.frame(scenario=rep(x$class,each=7),
road.type=c(rep("Collector",21),rep("Arterial",21)),
design=as.factor(rep(rep(c("poor","moderate","good"),each=7),2)),
class = rep(sort(unique(fit2$data$comfort_rating_ordered)),length(x$class)),
p.mean = NA,
p.lwr = NA,
p.upr = NA
)
d.plot$design <- factor(d.plot$design,levels=levels(d.plot$design)[c(3,2,1)])
for(s in 1:length(unique(d.plot$scenario))){
d.plot[d.plot$scenario==unique(d.plot$scenario)[s],"p.mean"] <- apply(pk[,s,],2,mean)
PI <- apply(pk[,s,],2,PCI,.9)
d.plot[d.plot$scenario==unique(d.plot$scenario)[s],"p.lwr"] <- PI[1,]
d.plot[d.plot$scenario==unique(d.plot$scenario)[s],"p.upr"] <- PI[2,]
}
png(file="output/Figure9.png",width=6.5,height=3,units="in",res=900,pointsize = 8)
ggplot(d.plot,aes(x=p.mean,y=class))+
geom_ribbon(aes(xmin=p.lwr,xmax=p.upr,group=scenario,fill=as.factor(design)),alpha=.2)+
geom_path(aes(group=scenario,color=as.factor(design)))+
geom_point(aes(color=as.factor(design)))+
coord_cartesian(xlim=c(0,1))+
xlab("Predicted Probability")+
ylab("")+
facet_wrap(~road.type)+
guides(fill=guide_legend(title="Design Class"),
color=guide_legend(title="Design Class"))+
theme(legend.position="top")
dev.off()
|
/R/pub_analysis/final_model_predictions.R
|
no_license
|
dtfitch/videosurvey
|
R
| false
| false
| 26,910
|
r
|
# final_model_predictions.R
# settled on fit2 for publication.
# libraries -----------------------
library(brms)
library(dplyr)
library(ggplot2)
library(ggridges)
# Functions -----------------------
report.brmsfit<-function(x, file=NULL, type="word", digits=3, info=FALSE,
include_ic=FALSE){
sx<-summary(x)
random<-tryCatch(do.call(rbind, sx$random), error=function(e) NA)
if(!any(is.na(random))) rownames(random)<-paste(rownames(random),rep(names(sx$random), sapply(sx$random, nrow)), sep=" ")
if(include_ic){
loo<-eval(parse(text="brms::loo(x)"))
obj<-list(coefficients=setNames(sx$fixed[,1], rownames(sx$fixed)), se=sx$fixed[,2],
random=random, loo=setNames(c(loo$estimates[1,1], loo$estimates[1,2]), c("ELPD (PSIS-LOO)", "ELPD SE")),
Eff.Sample_min=sx$fixed[,5], Rhat_max=round(sx$fixed[,6],2))
output<-rbind(cbind(round(obj$coefficients,digits),round(obj$se,digits),obj$Eff.Sample_min,obj$Rhat_max),
if(!any(is.na(random))) {
cbind(round(random[,1:2, drop=FALSE], digits), round(random[,5:6, drop=FALSE], digits))
},
c(round(loo$estimates[1,1], digits), round(loo$estimates[1,2], digits),NA,NA))
rownames(output)[dim(output)[1]]<-"LOO"
}else{
obj<-list(coefficients=setNames(sx$fixed[,1], rownames(sx$fixed)), se=sx$fixed[,2], random=random,
Rhat=round(sx$fixed[,5],3), Bulk_ESS=sx$fixed[,6], Tail_ESS=sx$fixed[,7])
output<-rbind(cbind(round(obj$coefficients,digits),round(obj$se,digits),obj$Rhat,obj$Bulk_ESS,obj$Tail_ESS),
if(!any(is.na(random))) {
cbind(round(random[,1:2, drop=FALSE], digits), round(random[,5:7, drop=FALSE], digits))
})
}
if(!is.null(file)){
info <- if(info) deparse(getCall(x)) else NULL
suppressWarnings(clickR::make_table(output, file, type, info=info))
}
obj$output <- data.frame(output, check.names=FALSE, stringsAsFactors=FALSE)
class(obj) <- "reportmodel"
invisible(obj)
}
cbind.fill<-function(x,fill=NA){
x<-lapply(x,as.matrix)
n<-max(sapply(x,nrow))
do.call(cbind,lapply(x, function(f)
rbind(f, matrix(fill,n-nrow(f),ncol(f)))))
}
PCI <- function( samples , prob=0.9 ) {
#percentile interval from Rethinking
concat <- function( ... ) {
paste( ... , collapse="" , sep="" )
}
x <- sapply( prob , function(p) {
a <- (1-p)/2
quantile( samples , probs=c(a,1-a) )
} )
# now order inside-out in pairs
n <- length(prob)
result <- rep(0,n*2)
for ( i in 1:n ) {
low_idx <- n+1-i
up_idx <- n+i
# lower
result[low_idx] <- x[1,i]
# upper
result[up_idx] <- x[2,i]
# add names
a <- (1-prob[i])/2
names(result)[low_idx] <- concat(round(a*100,0),"%")
names(result)[up_idx] <- concat(round((1-a)*100,0),"%")
}
return(result)
}
# FUNCTIIONS FOR PLOTTING---------------
wrap.it <- function(x, len)
{
sapply(x, function(y) paste(strwrap(y, len),
collapse = "\n"),
USE.NAMES = FALSE)
}
# Call this function with a list or vector
wrap.labels <- function(x, len)
{
if (is.list(x))
{
lapply(x, wrap.it, len)
} else {
wrap.it(x, len)
}
}
# Main Code -------------------------------------------
# read in model
fit1 <- readRDS("fit1.rds")
fit2 <- readRDS("fit2.rds")
parname_dict <- read.csv("parname_dict.csv")
# write summary table
report.brmsfit(fit1, file = "./output/fit_main_effects",
type = "word", digits = 3, info=FALSE, include_ic=FALSE)
report.brmsfit(fit2, file = "./output/fit_final_model",
type = "word", digits = 3, info=FALSE, include_ic=FALSE)
# - Make variable name dictionary ------------------
varname_dict = names(fit2$data)
names(varname_dict) = varname_dict
tmp.rename.func <- function(varname_dict, var, newname) {
x = which(varname_dict == var)
if (length(x) > 0) {varname_dict[x] = newname}
varname_dict
}
varname_dict = tmp.rename.func(varname_dict, "comfort_rating_ordered", "comfort rating")
varname_dict = tmp.rename.func(varname_dict, "female1", "Woman")
varname_dict = tmp.rename.func(varname_dict, "child_u18TRUE", "HH w/ child")
varname_dict = tmp.rename.func(varname_dict, "age", "Age")
varname_dict = tmp.rename.func(varname_dict, "VideoGroupWithin", "'Within' experimental group")
varname_dict = tmp.rename.func(varname_dict, "op_like_biking", "like riding a bike")
varname_dict = tmp.rename.func(varname_dict, "op_need_car", "need car for activities")
varname_dict = tmp.rename.func(varname_dict, "op_feel_safe", "feel safe biking on campus")
varname_dict = tmp.rename.func(varname_dict, "op_like_transit", "like using public transit")
varname_dict = tmp.rename.func(varname_dict, "op_arrive_professional", "job needs professional attire")
varname_dict = tmp.rename.func(varname_dict, "op_travel_stress", "travelling to campus is stressful")
varname_dict = tmp.rename.func(varname_dict, "bike_ability", "confidence level riding a bike")
varname_dict = tmp.rename.func(varname_dict, "comfort_four_no_lane2", "willing to bike on 4-lane road")
varname_dict = tmp.rename.func(varname_dict, "comfort_four_no_lane3", "comfortable biking on 4-lane road")
varname_dict = tmp.rename.func(varname_dict, "usual_mode_4levBike", "usually commute to campus by bike")
varname_dict = tmp.rename.func(varname_dict, "street_parking_ST1", "street parking")
varname_dict = tmp.rename.func(varname_dict, "outside_lane_width_ft_ST", "outside lane width")
varname_dict = tmp.rename.func(varname_dict, "veh_volume2_ST2", "low volume")
varname_dict = tmp.rename.func(varname_dict, "veh_volume2_ST3", "high volume")
varname_dict = tmp.rename.func(varname_dict, "bike_operating_space_ST", "bike operating space")
varname_dict = tmp.rename.func(varname_dict, "bike_lane_SUM_ST1", "bike lane, no buffer")
varname_dict = tmp.rename.func(varname_dict, "bike_lane_SUM_ST2", "bike lane, with buffer")
varname_dict = tmp.rename.func(varname_dict, "speed_prevail_minus_limit_ST", "prevailing minus posted speed")
varname_dict = tmp.rename.func(varname_dict, "speed_limit_mph_ST_3lev.30.40.", "speed limit [30,40)")
varname_dict = tmp.rename.func(varname_dict, "speed_limit_mph_ST_3lev.40.50.", "speed limit [40,50]")
varname_dict = tmp.rename.func(varname_dict, "veh_vol_non0_opspace_0_ST", "bikes share space with cars")
varname_dict = tmp.rename.func(varname_dict, "person_ID", "person ID")
varname_dict = tmp.rename.func(varname_dict, "video_name", "video name")
#more we'll need below
varname_dict2 = setNames(nm = c("ability_comfort", "road_environment", "id", "attitude",
"b_Intercept\\[1\\]", "b_Intercept\\[2\\]", "b_Intercept\\[3\\]",
"b_Intercept\\[4\\]", "b_Intercept\\[5\\]", "b_Intercept\\[6\\]",
"sd_person_ID__Intercept", "sd_video_name__Intercept"),
c("biking comfort", "road environment", "id", "transit attitudes",
"Intercept 1 ", "Intercept 2", "Intercept 3",
"Intercept 4", "Intercept 5", "Intercept 6",
"SD person ID Intercept", "SD video name Intercept"))
varname_dict = c(varname_dict, varname_dict2)
rm(varname_dict2)
# reorder varname_dict for plotting
varname_dict <- varname_dict[c(1,2,20,24,3:19,21:23,25:39)]
# - make parameter plots ----------
post1 <-posterior_samples(fit1)[1:35]
post1$model <- "Main Effects"
post2 <-posterior_samples(fit2)[1:42]
post2$model <- "Final"
d.plot <- plyr::rbind.fill(post1,post2)
d.plot <- reshape2::melt(d.plot,"model")
d.plot$variable <- plyr::mapvalues(d.plot$variable,from=parname_dict$pname,to=parname_dict$label)
new.order <-
dplyr::inner_join(data.frame(label=levels(d.plot$variable),old_order=1:length(unique(d.plot$variable))),
parname_dict,by="label") %>%
arrange(desc(order))
d.plot$variable <- factor(d.plot$variable,levels=levels(d.plot$variable)[new.order$old_order])
# wrap long labels
levels(d.plot$variable) <- wrap.labels(levels(d.plot$variable),35)
d.plot$model <- factor(d.plot$model)
d.plot$model <- factor(d.plot$model,level=levels(d.plot$model)[c(2,1)])
png(file="output/Figure5a.png",width=6.5,height=9,units="in",res=900,pointsize = 4)
ggplot(d.plot[d.plot$variable %in% levels(d.plot$variable)[42:20],], aes(x = value, y = variable)) +
coord_cartesian(xlim = c(-2.5,5.5))+
geom_density_ridges(scale = 1.2, rel_min_height=0.01) +
geom_hline(yintercept=9)+
geom_hline(yintercept=14)+
geom_hline(yintercept=18)+
geom_vline(xintercept=0,linetype="dashed")+
labs(x = "Parameter (cumulative logit scale)") +
facet_wrap(~model,nrow=1,labeller = label_wrap_gen(width = 40, multi_line = TRUE))+
theme(axis.title.y=element_blank(),
text = element_text(size=12))
dev.off()
png(file="output/Figure5b.png",width=6.5,height=9,units="in",res=900,pointsize = 4)
ggplot(d.plot[d.plot$variable %in% levels(d.plot$variable)[21:1],], aes(x = value, y = variable)) +
coord_cartesian(xlim = c(-2.5,5.5))+
geom_density_ridges(scale = 1.2, rel_min_height=0.01) +
geom_hline(yintercept=8)+
geom_hline(yintercept=19)+
geom_vline(xintercept=0,linetype="dashed")+
labs(x = "Parameter (cumulative logit scale)") +
facet_wrap(~model,nrow=1,labeller = label_wrap_gen(width = 40, multi_line = TRUE))+
theme(axis.title.y=element_blank(),
text = element_text(size=12))
dev.off()
# Setup conditions for predictive plots ---------------------------
# summary
str(fit2$data)
names(fit2$data)
d.model <- readRDS("data_for_models_nonscaled.RDS")
# create by-person data frame for finding quantiles accurately
d.scenario = fit2$data %>% group_by(person_ID) %>%
dplyr::select(-c("comfort_rating_ordered","video_name")) %>% summarize_all(first)
# Building blocks ----
# - Individual-level ----
# + attitudes ----
op_levels = data.frame(apply(d.scenario[,-1], 2, quantile, probs = c(.1,.5,.9)))
low_pos_attitudes = c(op_like_biking = op_levels$op_like_biking[1], op_feel_safe = op_levels$op_feel_safe[1], op_like_transit = op_levels$op_like_transit[1])
mid_pos_attitudes = c(op_like_biking = op_levels$op_like_biking[2], op_feel_safe = op_levels$op_feel_safe[2], op_like_transit = op_levels$op_like_transit[2])
high_pos_attitudes = c(op_like_biking = op_levels$op_like_biking[3], op_feel_safe = op_levels$op_feel_safe[3], op_like_transit = op_levels$op_like_transit[3])
low_neg_attitudes = c(op_need_car = op_levels$op_need_car[1], op_arrive_professional = op_levels$op_arrive_professional[1], op_travel_stress = op_levels$op_travel_stress[1])
mid_neg_attitudes = c(op_need_car = op_levels$op_need_car[2], op_arrive_professional = op_levels$op_arrive_professional[2], op_travel_stress = op_levels$op_travel_stress[2])
high_neg_attitudes = c(op_need_car = op_levels$op_need_car[3], op_arrive_professional = op_levels$op_arrive_professional[3], op_travel_stress = op_levels$op_travel_stress[3])
bad_attitudes = c(low_pos_attitudes, high_neg_attitudes)
mid_attitudes = c(mid_pos_attitudes, mid_neg_attitudes)
good_attitudes = c(high_pos_attitudes, low_neg_attitudes)
# + ability + comfort ----
low_ability_comfort = data.frame(comfort_four_no_lane2=0, comfort_four_no_lane3=0, bike_ability=.5, usual_mode_4levBike = 0) #somewhat confident, low comfort
mid_ability_comfort = data.frame(comfort_four_no_lane2=1, comfort_four_no_lane3=0, bike_ability=.5, usual_mode_4levBike = 0) #somewaht confident, moderate comfort
high_ability_comfort = data.frame(comfort_four_no_lane2=0, comfort_four_no_lane3=1, bike_ability=1, usual_mode_4levBike = 1) #very confident, high comfort
# + demographic ----
agelevels = quantile(d.scenario$age , c(.1,.8,.95))
# On real scale
agelevels*(max(d.model$age,na.rm=T) - min(d.model$age,na.rm=T)) + min(d.model$age,na.rm=T)
# 10% 80% 95%
# 20 34 57
young_childless_male = data.frame(age = agelevels[1], child_u18TRUE = 0, female1 = 0)
midage_child_female = data.frame(age = agelevels[2], child_u18TRUE = 1, female1 = 1)
old_childless_male = data.frame(age = agelevels[3], child_u18TRUE = 0, female1 = 0)
old_childless_female = data.frame(age = agelevels[3], child_u18TRUE = 0, female1 = 1)
# - Road environment ----
speed_prevail_levels = quantile(fit2$data$speed_prevail_minus_limit_ST, c(.05,.5,.95))
speed_prevail_levels*(max(d.model$speed_prevail_minus_limit_ST) - min(d.model$speed_prevail_minus_limit_ST)) + min(d.model$speed_prevail_minus_limit_ST)
# 5% 50% 95%
# -10 0 5
outside_lane_levels = quantile(fit2$data$outside_lane_width_ft_ST, c(.05,.5,.95))
outside_lane_levels*(max(d.model$outside_lane_width_ft_ST) - min(d.model$outside_lane_width_ft_ST)) + min(d.model$outside_lane_width_ft_ST)
# 5% 50% 95%
# 9 11 13
bike_space_levels = quantile(fit2$data$bike_operating_space_ST, c(.05,.5,.95))
bike_space_levels*(max(d.model$bike_operating_space_ST) - min(d.model$bike_operating_space_ST)) + min(d.model$bike_operating_space_ST)
# 5% 50% 95%
# 0 5 11
collector_good = data.frame(veh_volume2_ST2 = 0, veh_volume2_ST3 = 0,
speed_limit_mph_ST_3lev.30.40. = 0, speed_limit_mph_ST_3lev.40.50. = 0,
bike_lane_SUM_ST1 = 0, bike_lane_SUM_ST2 = 1,
speed_prevail_minus_limit_ST = speed_prevail_levels[1],
street_parking_ST1 = 1,
outside_lane_width_ft_ST = outside_lane_levels[1],
bike_operating_space_ST = bike_space_levels[3],
veh_vol_non0_opspace_0_ST = 0)
collector_mid = data.frame(veh_volume2_ST2 = 1, veh_volume2_ST3 = 0,
speed_limit_mph_ST_3lev.30.40. = 1, speed_limit_mph_ST_3lev.40.50. = 0,
bike_lane_SUM_ST1 = 1, bike_lane_SUM_ST2 = 0,
speed_prevail_minus_limit_ST = speed_prevail_levels[2],
street_parking_ST1 = 1,
outside_lane_width_ft_ST = outside_lane_levels[2],
bike_operating_space_ST = bike_space_levels[2],
veh_vol_non0_opspace_0_ST = 0)
collector_bad = data.frame(veh_volume2_ST2 = 0, veh_volume2_ST3 = 1,
speed_limit_mph_ST_3lev.30.40. = 1, speed_limit_mph_ST_3lev.40.50. = 0,
bike_lane_SUM_ST1 = 0, bike_lane_SUM_ST2 = 0,
speed_prevail_minus_limit_ST = speed_prevail_levels[3],
street_parking_ST1 = 1,
outside_lane_width_ft_ST = outside_lane_levels[3],
bike_operating_space_ST = bike_space_levels[1],
veh_vol_non0_opspace_0_ST = 1)
arterial_good = data.frame(veh_volume2_ST2 = 0, veh_volume2_ST3 = 1,
speed_limit_mph_ST_3lev.30.40. = 1, speed_limit_mph_ST_3lev.40.50. = 0,
bike_lane_SUM_ST1 = 0, bike_lane_SUM_ST2 = 1,
speed_prevail_minus_limit_ST = speed_prevail_levels[1],
street_parking_ST1 = 1, # all streets have street parking
outside_lane_width_ft_ST = outside_lane_levels[1], #<- outside lane width effect is neg
bike_operating_space_ST = bike_space_levels[3],
veh_vol_non0_opspace_0_ST = 0)
arterial_mid = data.frame(veh_volume2_ST2 = 0, veh_volume2_ST3 = 1,
speed_limit_mph_ST_3lev.30.40. = 0, speed_limit_mph_ST_3lev.40.50. = 1,
bike_lane_SUM_ST1 = 1, bike_lane_SUM_ST2 = 0,
speed_prevail_minus_limit_ST = speed_prevail_levels[2],
street_parking_ST1 = 1, # all streets have street parking
outside_lane_width_ft_ST = outside_lane_levels[2],
bike_operating_space_ST = bike_space_levels[2],
veh_vol_non0_opspace_0_ST = 0)
arterial_bad = data.frame(veh_volume2_ST2 = 0, veh_volume2_ST3 = 1,
speed_limit_mph_ST_3lev.30.40. = 0, speed_limit_mph_ST_3lev.40.50. = 1,
bike_lane_SUM_ST1 = 0, bike_lane_SUM_ST2 = 0,
speed_prevail_minus_limit_ST = speed_prevail_levels[3],
street_parking_ST1 = 1, # all streets have street parking
outside_lane_width_ft_ST = outside_lane_levels[3],
bike_operating_space_ST = bike_space_levels[1],
veh_vol_non0_opspace_0_ST = 1)
attitudes = data.frame(rbind(bad_attitudes, mid_attitudes, good_attitudes),
id = 1, attitude = as.factor(c("bad_attitude", "mid_attitude", "good_attitude")))
attitudes$attitude = ordered(attitudes$attitude, levels(attitudes$attitude)[c(1,3,2)])
ability_comfort = data.frame(rbind(low_ability_comfort, mid_ability_comfort, high_ability_comfort),
id = 1, ability_comfort = as.factor(c("low_comfort", "mid_comfort", "high_comfort")))
ability_comfort$ability_comfort = ordered(ability_comfort$ability_comfort, levels(ability_comfort$ability_comfort)[c(2,3,1)])
road_environments = data.frame(rbind(collector_bad, collector_mid, collector_good,
arterial_bad, arterial_mid, arterial_good),
id = 1, road_environment = c("collector_bad", "collector_mid", "collector_good",
"arterial_bad", "arterial_mid", "arterial_good"))
road_environments$road_environment = ordered(road_environments$road_environment,
levels = c("arterial_bad", "arterial_mid", "arterial_good", "collector_bad", "collector_mid", "collector_good"))
person = data.frame(rbind(young_childless_male, #midage_child_female, old_childless_female,
old_childless_male),
id = 1, person = c("20yr_man", #"midage_child_female", "old_childless_male",
"57yr_woman"))
all_counterfactuals = plyr::join_all(list(attitudes, ability_comfort, road_environments, person), by='id', type='full' )
all_counterfactuals$rowID = 1:nrow(all_counterfactuals)
building_blocks = list(attitudes = attitudes, ability_comfort = ability_comfort, road_environments = road_environments)
sapply(building_blocks, function(x) {names(x) = varname_dict[names(x)]; x})
# + Add interactions ----
interaction.terms = trimws(strsplit(as.character(fit2$formula[[1]][3][1]), " \\+ ")[[1]])
interaction.terms = interaction.terms[grepl(":",interaction.terms)]
interactions = data.frame(do.call("cbind", lapply(interaction.terms, function(term) {
tmp = all_counterfactuals %>% dplyr::select(names(all_counterfactuals)[sapply(names(all_counterfactuals), grepl, x = term)])
apply(tmp, 1, prod)
})))
names(interactions) = interaction.terms
all_counterfactuals = data.frame(all_counterfactuals, interactions)
# simplified plot for me_per_vid for report ------------
x = rbind(data.frame(c(bad_attitudes, low_ability_comfort, old_childless_female, collector_bad)),
data.frame(c(bad_attitudes, low_ability_comfort, old_childless_female, collector_mid)),
data.frame(c(bad_attitudes, low_ability_comfort, old_childless_female, collector_good)),
data.frame(c(bad_attitudes, low_ability_comfort, old_childless_female, arterial_bad)),
data.frame(c(bad_attitudes, low_ability_comfort, old_childless_female, arterial_mid)),
data.frame(c(bad_attitudes, low_ability_comfort, old_childless_female, arterial_good)))
x$class <- c("bad_attitude.low_comfort.collector_bad",
"bad_attitude.low_comfort.collector_mid",
"bad_attitude.low_comfort.collector_good",
"bad_attitude.low_comfort.arterial_bad",
"bad_attitude.low_comfort.arterial_mid",
"bad_attitude.low_comfort.arterial_good")
x$VideoGroupWithin <- rep(0,nrow(x))
# general predictive plots for interactions -----------------------
newdata <- rbind(x[rep(2,9),])
newdata$age <- rep(agelevels,3)
newdata$bike_operating_space_ST <- rep(bike_space_levels,each=3)
newdata2 <- newdata
newdata2$comfort_four_no_lane3 <- 1
newdata<- rbind(newdata,newdata2)
newdata$age_class <- newdata$age*(max(d.model$age,na.rm=T) - min(d.model$age,na.rm=T)) + min(d.model$age,na.rm=T)
newdata$comfort_four_no_lane3_class <- c(rep("NOT comfortable on mixed arterial",9),rep("Comfortable on mixed arterial",9))
newdata$bike_operating_space_ST_class <- newdata$bike_operating_space_ST*(max(d.model$bike_operating_space_ST) - min(d.model$bike_operating_space_ST)) + min(d.model$bike_operating_space_ST)
# predict with data to show interactions
pk <- posterior_epred(fit2,newdata=newdata,allow_new_level=T,sample_new_levels="gaussian")
d.plot <- data.frame(scenario = rep(1:18,each=7),
age=rep(paste(newdata$age_class,"yo"),each=7),
comfort=rep(newdata$comfort_four_no_lane3_class,each=7),
bike_space=rep(newdata$bike_operating_space_ST_class,each=7),
class = rep(sort(unique(fit2$data$comfort_rating_ordered)),length(newdata$class)),
p.mean = NA,
p.lwr = NA,
p.upr = NA
)
for(s in 1:length(unique(d.plot$scenario))){
d.plot[d.plot$scenario==unique(d.plot$scenario)[s],"p.mean"] <- apply(pk[,s,],2,mean)
PI <- apply(pk[,s,],2,PCI,.9)
d.plot[d.plot$scenario==unique(d.plot$scenario)[s],"p.lwr"] <- PI[1,]
d.plot[d.plot$scenario==unique(d.plot$scenario)[s],"p.upr"] <- PI[2,]
}
# age plot
png(file="output/Figure7.png",width=6.5,height=3,units="in",res=900,pointsize = 8)
ggplot(d.plot[d.plot$comfort=="NOT comfortable on mixed arterial",],aes(x=p.mean,y=class))+
geom_ribbon(aes(xmin=p.lwr,xmax=p.upr,group=scenario,fill=as.factor(bike_space)),alpha=.2)+
geom_path(aes(group=scenario,color=as.factor(bike_space)))+
geom_point(aes(color=as.factor(bike_space)))+
coord_cartesian(xlim=c(0,.8))+
xlab("Predicted Probability")+
ylab("")+
facet_grid(~age)+
guides(fill=guide_legend(title="Bike operating space (ft)"),
color=guide_legend(title="Bike operating space (ft)"))+
theme(legend.position="top")
dev.off()
# comfort plot
png(file="output/Figure8.png",width=6.5,height=3,units="in",res=900,pointsize = 8)
ggplot(d.plot[d.plot$age=="33 yo",],aes(x=p.mean,y=class))+
geom_ribbon(aes(xmin=p.lwr,xmax=p.upr,group=scenario,fill=as.factor(bike_space)),alpha=.2)+
geom_path(aes(group=scenario,color=as.factor(bike_space)))+
geom_point(aes(color=as.factor(bike_space)))+
coord_cartesian(xlim=c(0,.8))+
xlab("Predicted Probability")+
ylab("")+
facet_grid(~comfort)+
guides(fill=guide_legend(title="Bike operating space (ft)"),
color=guide_legend(title="Bike operating space (ft)"))+
theme(legend.position="top")
dev.off()
# scenario predictive plots ---------------------
# # predict for each sample the response while considering new videos and new people
# # I.e. include the uncertainty from videos and people in the predictions
# n=10
# pred.cumsum <- array(0, dim = c(3, nrow(x), n))
# for(i in 1:n){
# pred <- predict(fit2, newdata = x,summary=T,
# allow_new_level=T, sample_new_levels="gaussian")
# tmp <- apply(pred,2,table)
# if(is.list(tmp)){
# tmp <- sapply(1:length(tmp),function(x) as.vector(tmp[[x]]))
# tmp <- cbind.fill(tmp,fill=0)
# }
# pred.cumsum[,,i] <- rbind(colSums(tmp[5:7,]),colSums(tmp[6:7,]),tmp[7,])
# }
# pred.cumsum <- apply(pred.cumsum,c(2,3),function(x) x/nrow(pred))
# pred.mean <- apply(pred.cumsum,c(1,2),mean)
# pred.PI <- apply(pred.cumsum,c(1,2),PCI,prob=.95)
#
# pred.plot <- data.frame(class=rep(x$class,each=3),
# road = c(rep("collector",9),rep("arterial",9)),
# scenario = rep(rep(c("poor","average","best"),each=3),2),
# comfort = rep(c("At least slightly comfortable",
# "At least moderatly comfortable",
# "Very comfortable"),3),
# Estimate = as.vector(pred.mean),
# Q2.5 = as.vector(pred.PI[1,,]),
# Q97.5 = as.vector(pred.PI[2,,]))
# pred.plot$comfort <- factor(pred.plot$comfort, levels = c("At least slightly comfortable",
# "At least moderatly comfortable",
# "Very comfortable"))
# pred.plot$scenario <- factor(pred.plot$scenario, levels = c("poor","average","best"))
#
# #Collector
# ggplot( pred.plot, aes(comfort, Estimate)) +
# geom_point(size=.8) +
# #geom_line(aes(group=class)) +
# geom_errorbar(aes(ymin = Q2.5, ymax = Q97.5), width = 0.2)+
# facet_grid(scenario~road)+#, ncol = 3, strip.position = "top") +
# theme_bw() +
# coord_flip()+
# ylab("Predicted proportion of responses")+
# #ggtitle(paste("Collectors, ", model_name)) +
# theme(strip.text = element_text(size = 8),
# axis.title.y = element_blank())
# Alternative (that I like better) ------------
pk <- posterior_epred(fit2,newdata=x,allow_new_level=T,sample_new_levels="gaussian")
d.plot <- data.frame(scenario=rep(x$class,each=7),
road.type=c(rep("Collector",21),rep("Arterial",21)),
design=as.factor(rep(rep(c("poor","moderate","good"),each=7),2)),
class = rep(sort(unique(fit2$data$comfort_rating_ordered)),length(x$class)),
p.mean = NA,
p.lwr = NA,
p.upr = NA
)
d.plot$design <- factor(d.plot$design,levels=levels(d.plot$design)[c(3,2,1)])
for(s in 1:length(unique(d.plot$scenario))){
d.plot[d.plot$scenario==unique(d.plot$scenario)[s],"p.mean"] <- apply(pk[,s,],2,mean)
PI <- apply(pk[,s,],2,PCI,.9)
d.plot[d.plot$scenario==unique(d.plot$scenario)[s],"p.lwr"] <- PI[1,]
d.plot[d.plot$scenario==unique(d.plot$scenario)[s],"p.upr"] <- PI[2,]
}
png(file="output/Figure9.png",width=6.5,height=3,units="in",res=900,pointsize = 8)
ggplot(d.plot,aes(x=p.mean,y=class))+
geom_ribbon(aes(xmin=p.lwr,xmax=p.upr,group=scenario,fill=as.factor(design)),alpha=.2)+
geom_path(aes(group=scenario,color=as.factor(design)))+
geom_point(aes(color=as.factor(design)))+
coord_cartesian(xlim=c(0,1))+
xlab("Predicted Probability")+
ylab("")+
facet_wrap(~road.type)+
guides(fill=guide_legend(title="Design Class"),
color=guide_legend(title="Design Class"))+
theme(legend.position="top")
dev.off()
|
#Courtship and Copulation changes under predation threat
## Set up files and packages needed
#install.packages("dplyr")
library(dplyr)
library(lme4)
library(effects)
library(ggplot2)
# Bring in the data for mature females (copulation) and Immature females (courtship)
copulation <- read.csv("Mature.csv",h=T)
courtship <- read.csv("Immature.csv",h=T)
##Courtship Analysis
# Create unique Fly_ID for each individual with Box (treatment), date, replicate, and vial Number
courtship$Fly_ID <- with(courtship, paste0(courtship$Box,courtship$Date,courtship$Replicate, courtship$Vial_number))
courtship$Fly_ID
# Create Delta BP
courtship$deltaBP <- (courtship$BP.12.00.am - courtship$BP.8.00.Am)
# Change time (in HH:MM:SS) format to seconds (One had a value of -1, not sure, but changed to 0)
courtship$startTimeSeconds <- (courtship$trial_latency_behav_end - courtship$court_duration)
courtship$startTimeSeconds[1339]
courtship$startTimeSeconds[1339] = 0
# Create new column of relative values for courtship start times (i.e so Observation all start at Time = 0)
courtship$relativeStartTimeSeconds <- (courtship$startTimeSeconds - courtship$Observation.Initiation)
courtship$relativeStartTimeSeconds
# New column for relative values of trial duration at end of behaviour
courtship$relativeTrial_latency_end <- (courtship$relativeStartTimeSeconds + courtship$court_duration)
courtship$relativeTrial_latency_end
# Need to get all courtship under 900 seconds (relative court duration)
#First, transition step for finding values ending abov 900
courtship$nineHundredTransition <- (900 - courtship$relativeTrial_latency_end)
# Second, if value for nineHundredTransition is negative, equate it to 900, all else stay as relativeTrial_latency_end
courtship$relativeTrialLatency900 <- ifelse(courtship$nineHundredTransition<0,900,courtship$relativeTrial_latency_end)
# Third, relative courtship duration (not including values over 900)
courtship$relativeCourtDuration <- (courtship$relativeTrialLatency900 - courtship$relativeStartTimeSeconds)
startLess900 <- subset(courtship, relativeCourtDuration>0)
summary(startLess900)
# Create a data frame for the predictor variables and for Fly_ID
pred_var_dat <- subset(courtship, select = c(Box, Date, Replicate, Vial_number, Temp, Humidity, BP.12.00.am, BP.8.00.Am, BP.Room,
Observation.Initiation, Fly_ID, deltaBP))
# Make data frame only include unique values (i.e. remove duplicate rows)
pred_var_dat <- unique(pred_var_dat)
#Rename Box to treatment type, and make characters to factor (to run later for plot(effect()))
pred_var_dat$Box <- ifelse(pred_var_dat$Box=="A", "Predator", "Control")
pred_var_dat$Box
pred_var_dat$Box <- factor(pred_var_dat$Box)
# Create a group by Fly_Id
FlyID <- group_by(startLess900,Fly_ID)
head(FlyID)
courtSum <- summarise(FlyID, sum = sum(relativeCourtDuration), count = n())
head(courtSum)
courtSum
courtship_for_analysis <- merge(x = pred_var_dat, y = courtSum, by.x="Fly_ID", by.y="Fly_ID")
with(courtship_for_analysis,
boxplot(sum ~ Box))
with(courtship_for_analysis,
boxplot(sum ~ Date))
### A simple version of the analysis
#Scale tempature, humidity and deltaBP
courtship_for_analysis$TempCent <- scale(courtship_for_analysis$Temp, scale=F)
courtship_for_analysis$HumCent <- scale(courtship_for_analysis$Humidity, scale = F)
courtship_for_analysis$BPCent <- scale(courtship_for_analysis$deltaBP, scale = F)
# Model for sum of time courting in 900 seconds
#redone below for proportions of time spent courting
#courtship_model1 <- lmer(sum ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = courtship_for_analysis)
#Change time courting to proportion in 900 seconds
courtship_for_analysis$court_prop <- courtship_for_analysis$sum/900
courtship_model1 <- lmer(court_prop ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = courtship_for_analysis)
summary(courtship_model1)
# Model for number of courtship bouts in the 900 second observation window
courtship_model2 <- lmer(count ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = courtship_for_analysis)
summary(courtship_model2)
plot(allEffects(courtship_model1))
plot(allEffects(courtship_model2))
plot(effect("Box", courtship_model1), main = "Male Time Courting of Immature Female in 900 Seconds",
ylab = "Proportion of Time courting (sec)", xlab = "Treatment")
plot(effect("Box", courtship_model2), main = "Number of Male Courtship Bouts to Immature Female in 900 Seconds",
ylab = "Courtship Bouts", xlab = "Treatment")
#### Copulation Analysis
#Similar to above for copulation now
summary(copulation)
dim(copulation)
head(copulation)
copulation$deltaBP <- (copulation$BP.12.00.am - copulation$BP.8.00.Am)
cop_data <- subset(copulation, select = c(Box, Date, Replicate, Vial.., Temp, Humidity, BP.12.00.am, BP.8.00.Am, BP.Room, Fly_ID, deltaBP))
cop_data <- unique(cop_data)
head(cop_data)
cop_data$Box <- ifelse(cop_data$Box=="C", "Predator", "Control")
cop_data$Box <- factor(cop_data$Box)
LatCop <- distinct(select(copulation, Cop_latency, Cop_Duration, Fly_ID))
head(LatCop)
copul_for_analysis <- merge(x = cop_data, y = LatCop, by.x="Fly_ID", by.y="Fly_ID")
copul_for_analysis$TempCent <- scale(copul_for_analysis$Temp, scale=F)
copul_for_analysis$HumCent <- scale(copul_for_analysis$Humidity, scale = F)
copul_for_analysis$BPCent <- scale(copul_for_analysis$deltaBP, scale = F)
### Simple linear models
copul_model1 <- lmer(Cop_latency ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = copul_for_analysis)
summary(copul_model1)
copul_model2 <- lmer(Cop_Duration ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = copul_for_analysis)
summary(copul_model2)
with(copul_for_analysis, boxplot(Cop_latency ~ Box))
with(copul_for_analysis, boxplot(Cop_Duration ~ Box))
plot(allEffects(copul_model1))
plot(allEffects(copul_model2))
plot(effect("Box", copul_model1), main = "Mature Female Copulation Latency Rates with/without predator",
ylab = "Copulation Latency (Sec)", xlab = "Treatment")
plot(effect("Box", copul_model2), main = "Mature Female Copulation Duration Rates with/without predator",
ylab = "Copulation Duration (Sec)", xlab="Treatment")
#Removing all values with no copulation
LatCop2 <- distinct(select(copulation, Cop_latency, Cop_Duration, Fly_ID))
head(LatCop2)
LatCop2 <- LatCop2[!(LatCop2$Cop_latency==0),]
#LatCop2
#dim(LatCop2)
copul_for_analysis2 <- merge(x = cop_data, y = LatCop2, by.x="Fly_ID", by.y="Fly_ID")
copul_for_analysis2$TempCent <- scale(copul_for_analysis2$Temp, scale=F)
copul_for_analysis2$HumCent <- scale(copul_for_analysis2$Humidity, scale = F)
copul_for_analysis2$BPCent <- scale(copul_for_analysis2$deltaBP, scale = F)
copul_model12 <- lmer(Cop_latency ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = copul_for_analysis2)
copul_model22 <- lmer(Cop_Duration ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = copul_for_analysis2)
summary(copul_model12)
summary(copul_model22)
plot(effect("Box", copul_model12), main = "Mature Female Copulation Latency Rates with/without predator, 0 removed",
ylab = "Copulation Latency (Sec)", xlab = "Treatment")
plot(effect("Box", copul_model22), main = "Mature Female Copulation Duration Rates with/without predator, 0's removed",
ylab = "Copulation Duration (Sec)", xlab="Treatment")
with(copul_for_analysis2, boxplot(Cop_latency ~ Box))
with(copul_for_analysis2, boxplot(Cop_Duration ~ Box))
# Copulation Count
#Count of mating for each
#byBox <- group_by(copul_for_analysis2, Box)
#byBox
#copCount <- summarise(byBox, count=n())
#copCount
#pie(copCount$count, labels = copCount$Box, radius = 1.0)
#Copulation Proportion:
head(copul_for_analysis)
copul_for_analysis$copulationSuccess <- ifelse(copul_for_analysis$Cop_latency==0, 0,1)
length(copul_for_analysis$Cop_Duration)
copprop_mod <- glm(copulationSuccess ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = copul_for_analysis, family = "binomial")
summary(copprop_mod)
plot(allEffects(copprop_mod))
plot(effect("Box", copprop_mod), main = "Copulation Proportions",
ylab = "Copulation Proportion", xlab = "Treatment")
|
/Mating_and_Predation_RCode.R
|
no_license
|
PaulKnoops/mating_under_predation
|
R
| false
| false
| 8,277
|
r
|
#Courtship and Copulation changes under predation threat
## Set up files and packages needed
#install.packages("dplyr")
library(dplyr)
library(lme4)
library(effects)
library(ggplot2)
# Bring in the data for mature females (copulation) and Immature females (courtship)
copulation <- read.csv("Mature.csv",h=T)
courtship <- read.csv("Immature.csv",h=T)
##Courtship Analysis
# Create unique Fly_ID for each individual with Box (treatment), date, replicate, and vial Number
courtship$Fly_ID <- with(courtship, paste0(courtship$Box,courtship$Date,courtship$Replicate, courtship$Vial_number))
courtship$Fly_ID
# Create Delta BP
courtship$deltaBP <- (courtship$BP.12.00.am - courtship$BP.8.00.Am)
# Change time (in HH:MM:SS) format to seconds (One had a value of -1, not sure, but changed to 0)
courtship$startTimeSeconds <- (courtship$trial_latency_behav_end - courtship$court_duration)
courtship$startTimeSeconds[1339]
courtship$startTimeSeconds[1339] = 0
# Create new column of relative values for courtship start times (i.e so Observation all start at Time = 0)
courtship$relativeStartTimeSeconds <- (courtship$startTimeSeconds - courtship$Observation.Initiation)
courtship$relativeStartTimeSeconds
# New column for relative values of trial duration at end of behaviour
courtship$relativeTrial_latency_end <- (courtship$relativeStartTimeSeconds + courtship$court_duration)
courtship$relativeTrial_latency_end
# Need to get all courtship under 900 seconds (relative court duration)
#First, transition step for finding values ending abov 900
courtship$nineHundredTransition <- (900 - courtship$relativeTrial_latency_end)
# Second, if value for nineHundredTransition is negative, equate it to 900, all else stay as relativeTrial_latency_end
courtship$relativeTrialLatency900 <- ifelse(courtship$nineHundredTransition<0,900,courtship$relativeTrial_latency_end)
# Third, relative courtship duration (not including values over 900)
courtship$relativeCourtDuration <- (courtship$relativeTrialLatency900 - courtship$relativeStartTimeSeconds)
startLess900 <- subset(courtship, relativeCourtDuration>0)
summary(startLess900)
# Create a data frame for the predictor variables and for Fly_ID
pred_var_dat <- subset(courtship, select = c(Box, Date, Replicate, Vial_number, Temp, Humidity, BP.12.00.am, BP.8.00.Am, BP.Room,
Observation.Initiation, Fly_ID, deltaBP))
# Make data frame only include unique values (i.e. remove duplicate rows)
pred_var_dat <- unique(pred_var_dat)
#Rename Box to treatment type, and make characters to factor (to run later for plot(effect()))
pred_var_dat$Box <- ifelse(pred_var_dat$Box=="A", "Predator", "Control")
pred_var_dat$Box
pred_var_dat$Box <- factor(pred_var_dat$Box)
# Create a group by Fly_Id
FlyID <- group_by(startLess900,Fly_ID)
head(FlyID)
courtSum <- summarise(FlyID, sum = sum(relativeCourtDuration), count = n())
head(courtSum)
courtSum
courtship_for_analysis <- merge(x = pred_var_dat, y = courtSum, by.x="Fly_ID", by.y="Fly_ID")
with(courtship_for_analysis,
boxplot(sum ~ Box))
with(courtship_for_analysis,
boxplot(sum ~ Date))
### A simple version of the analysis
#Scale tempature, humidity and deltaBP
courtship_for_analysis$TempCent <- scale(courtship_for_analysis$Temp, scale=F)
courtship_for_analysis$HumCent <- scale(courtship_for_analysis$Humidity, scale = F)
courtship_for_analysis$BPCent <- scale(courtship_for_analysis$deltaBP, scale = F)
# Model for sum of time courting in 900 seconds
#redone below for proportions of time spent courting
#courtship_model1 <- lmer(sum ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = courtship_for_analysis)
#Change time courting to proportion in 900 seconds
courtship_for_analysis$court_prop <- courtship_for_analysis$sum/900
courtship_model1 <- lmer(court_prop ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = courtship_for_analysis)
summary(courtship_model1)
# Model for number of courtship bouts in the 900 second observation window
courtship_model2 <- lmer(count ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = courtship_for_analysis)
summary(courtship_model2)
plot(allEffects(courtship_model1))
plot(allEffects(courtship_model2))
plot(effect("Box", courtship_model1), main = "Male Time Courting of Immature Female in 900 Seconds",
ylab = "Proportion of Time courting (sec)", xlab = "Treatment")
plot(effect("Box", courtship_model2), main = "Number of Male Courtship Bouts to Immature Female in 900 Seconds",
ylab = "Courtship Bouts", xlab = "Treatment")
#### Copulation Analysis
#Similar to above for copulation now
summary(copulation)
dim(copulation)
head(copulation)
copulation$deltaBP <- (copulation$BP.12.00.am - copulation$BP.8.00.Am)
cop_data <- subset(copulation, select = c(Box, Date, Replicate, Vial.., Temp, Humidity, BP.12.00.am, BP.8.00.Am, BP.Room, Fly_ID, deltaBP))
cop_data <- unique(cop_data)
head(cop_data)
cop_data$Box <- ifelse(cop_data$Box=="C", "Predator", "Control")
cop_data$Box <- factor(cop_data$Box)
LatCop <- distinct(select(copulation, Cop_latency, Cop_Duration, Fly_ID))
head(LatCop)
copul_for_analysis <- merge(x = cop_data, y = LatCop, by.x="Fly_ID", by.y="Fly_ID")
copul_for_analysis$TempCent <- scale(copul_for_analysis$Temp, scale=F)
copul_for_analysis$HumCent <- scale(copul_for_analysis$Humidity, scale = F)
copul_for_analysis$BPCent <- scale(copul_for_analysis$deltaBP, scale = F)
### Simple linear models
copul_model1 <- lmer(Cop_latency ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = copul_for_analysis)
summary(copul_model1)
copul_model2 <- lmer(Cop_Duration ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = copul_for_analysis)
summary(copul_model2)
with(copul_for_analysis, boxplot(Cop_latency ~ Box))
with(copul_for_analysis, boxplot(Cop_Duration ~ Box))
plot(allEffects(copul_model1))
plot(allEffects(copul_model2))
plot(effect("Box", copul_model1), main = "Mature Female Copulation Latency Rates with/without predator",
ylab = "Copulation Latency (Sec)", xlab = "Treatment")
plot(effect("Box", copul_model2), main = "Mature Female Copulation Duration Rates with/without predator",
ylab = "Copulation Duration (Sec)", xlab="Treatment")
#Removing all values with no copulation
LatCop2 <- distinct(select(copulation, Cop_latency, Cop_Duration, Fly_ID))
head(LatCop2)
LatCop2 <- LatCop2[!(LatCop2$Cop_latency==0),]
#LatCop2
#dim(LatCop2)
copul_for_analysis2 <- merge(x = cop_data, y = LatCop2, by.x="Fly_ID", by.y="Fly_ID")
copul_for_analysis2$TempCent <- scale(copul_for_analysis2$Temp, scale=F)
copul_for_analysis2$HumCent <- scale(copul_for_analysis2$Humidity, scale = F)
copul_for_analysis2$BPCent <- scale(copul_for_analysis2$deltaBP, scale = F)
copul_model12 <- lmer(Cop_latency ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = copul_for_analysis2)
copul_model22 <- lmer(Cop_Duration ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = copul_for_analysis2)
summary(copul_model12)
summary(copul_model22)
plot(effect("Box", copul_model12), main = "Mature Female Copulation Latency Rates with/without predator, 0 removed",
ylab = "Copulation Latency (Sec)", xlab = "Treatment")
plot(effect("Box", copul_model22), main = "Mature Female Copulation Duration Rates with/without predator, 0's removed",
ylab = "Copulation Duration (Sec)", xlab="Treatment")
with(copul_for_analysis2, boxplot(Cop_latency ~ Box))
with(copul_for_analysis2, boxplot(Cop_Duration ~ Box))
# Copulation Count
#Count of mating for each
#byBox <- group_by(copul_for_analysis2, Box)
#byBox
#copCount <- summarise(byBox, count=n())
#copCount
#pie(copCount$count, labels = copCount$Box, radius = 1.0)
#Copulation Proportion:
head(copul_for_analysis)
copul_for_analysis$copulationSuccess <- ifelse(copul_for_analysis$Cop_latency==0, 0,1)
length(copul_for_analysis$Cop_Duration)
copprop_mod <- glm(copulationSuccess ~ Box + Replicate + TempCent + HumCent + BPCent + (1|Date), data = copul_for_analysis, family = "binomial")
summary(copprop_mod)
plot(allEffects(copprop_mod))
plot(effect("Box", copprop_mod), main = "Copulation Proportions",
ylab = "Copulation Proportion", xlab = "Treatment")
|
library(shiny)
shinyUI(
fluidPage(
titlePanel("Estimating Natural Mortality (M)"),
h5(p(em("This tool employs various empirical estimators of natural mortality."))),
h5(p(em("As the user enters values for the below input parameters,"))),
h5(p(em("estimates will be displayed in the main panel."))),
br(),
h4(p("References for each method can be found",tags$a(href="javascript:window.open('References_M.html', '_blank','width=600,height=400')", "here"))),
headerPanel("Input parameters"),
sidebarLayout(
sidebarPanel
(
numericInput("Amax", "Maximum age (years):", value=NA,min=1, max=300, step=0.1),
numericInput("Linf","Linf (in cm):", value=NA,min=1, max=1000, step=0.01),
numericInput("k", "VBGF Growth coeff. k:", value=NA,min = 0.001, max = 1,step=0.01),
numericInput("t0", "VBGF age at size 0 (t_0)", value=NA,min = -15, max = 15,step=0.01),
numericInput("Amat","Age at maturity (years)", value=NA,min = 0.01, max = 100,step=0.01),
numericInput("Winf","Asym. weight (Winf, in g):", value=NA,min = 0, max = 100000,step=0.1),
numericInput("kw","VBGF Growth coeff. wt. (kw, in g): ", value=NA,min = 0.001, max = 5,step=0.01),
numericInput("Temp","Water temperature (in C):" , value=NA,min = 0.001, max = 60,step=0.01),
numericInput("Wdry","Total dry weight (in g):" ,value=NA,min = 0.01, max = 1000000,step=0.01),
numericInput("Wwet","Total wet weight (in g):" ,value=NA,min = 0.01, max = 1000000,step=0.01),
numericInput("Bl","Body length (cm):",value=NA,min = 0.01, max = 10000,step=0.01),
numericInput("GSI","Gonadosomatic index:",value=NA,min = 0, max = 1,step=0.001),
numericInput("User_M","User M input:",value=NA,min = 0, max = 10,step=0.001),
br(),
br(),
h3("Composite M: method weighting"),
h5(p(em("Allows for weighting of the contribution of each method in the composite M distribution"))),
h5("Values range from 0 to 1. A value of 0 removes the contribution; a value of 1 is full weighting."),
h5("Default values are based on redundancies of methods using similar information."),
h5("For instance,the four max. age methods are given a weight of 0.25, so all weighted together equal 1"),
wellPanel(
fluidRow(
column(4,numericInput("Then_Amax_1","Then_Amax 1",value=0.25,min = 0, max = 1,step=0.001)),
column(4,numericInput("Then_Amax_2","Then_Amax 2",value=0.25,min = 0, max = 1,step=0.001)),
column(4,numericInput("Then_Amax_3","Then_Amax 3",value=0.25,min = 0, max = 1,step=0.001))
),
fluidRow(
column(4,numericInput("Hamel_Amax","Hamel_Amax",value=0.25,min = 0, max = 1,step=0.001)),
column(4,numericInput("AnC","AnC",value=0,min = 0, max = 1,step=0.001)),
column(4,numericInput("Then_VBGF","Then_VBGF",value=0.34,min = 0, max = 1,step=0.001))
),
fluidRow(
column(4,numericInput("Jensen_VBGF_1","Jensen_VBGF 1",value=0.33,min = 0, max = 1,step=0.001)),
column(4,numericInput("Jensen_VBGF_2","Jensen_VBGF 2",value=0.33,min = 0, max = 1,step=0.001)),
column(4,numericInput("Pauly_lt","Pauly_lt",value=0.5,min = 0, max = 1,step=0.001))
),
fluidRow(
column(4,numericInput("Gislason","Gislason",value=1,min = 0, max = 1,step=0.001)),
column(4,numericInput("Chen_Wat","Chen-Wat",value=0.5,min = 0, max = 1,step=0.001)),
column(4,numericInput("Roff","Roff",value=0.5,min = 0, max = 1,step=0.001))
),
fluidRow(
column(4,numericInput("Jensen_Amat","Jensen_Amat",value=0.5,min = 0, max = 1,step=0.001)),
column(4,numericInput("Ri_Ef_Amat","Ri_Ef_Amat",value=0.5,min = 0, max = 1,step=0.001)),
column(4,numericInput("Pauly_wt","Pauly_wt",value=0.5,min = 0, max = 1,step=0.001))
),
fluidRow(
column(4,numericInput("PnW","PnW",value=0.5,min = 0, max = 1,step=0.001)),
column(4,numericInput("Lorenzen","Lorenzen",value=1,min = 0, max = 1,step=0.001)),
column(4,numericInput("Gonosoma","GSI",value=1,min = 0, max = 1,step=0.001))
),
fluidRow(
column(4,numericInput("UserM","User M",value=1,min = 0, max = 1,step=0.001)))
)
),
mainPanel(
h4("Natural mortality (M) estimates by method"),
plotOutput("Mplot"),
h4("Natural mortality (M) values"),
fluidRow(
column(6,tableOutput("Mtable")),
column(6,tableOutput("Mtable2")),
downloadButton('downloadMs', 'Download M values'),
downloadButton('downloadCW_M_a', 'Download Chen-Wat. age-specific M values'),
br(),
br(),
br(),
h4("Composite natural mortality"),
h5(p(em("Blue vertical line indicates median value"))),
plotOutput("Mcomposite"),
downloadButton('downloadMcompositedensityplot', 'Download composite M density plot'),
downloadButton('downloadMcompositedist', 'Download composite M for resampling')
)
)
)
)
)
|
/ui.R
|
no_license
|
mkapur/Natural-Mortality-Tool
|
R
| false
| false
| 5,283
|
r
|
library(shiny)
shinyUI(
fluidPage(
titlePanel("Estimating Natural Mortality (M)"),
h5(p(em("This tool employs various empirical estimators of natural mortality."))),
h5(p(em("As the user enters values for the below input parameters,"))),
h5(p(em("estimates will be displayed in the main panel."))),
br(),
h4(p("References for each method can be found",tags$a(href="javascript:window.open('References_M.html', '_blank','width=600,height=400')", "here"))),
headerPanel("Input parameters"),
sidebarLayout(
sidebarPanel
(
numericInput("Amax", "Maximum age (years):", value=NA,min=1, max=300, step=0.1),
numericInput("Linf","Linf (in cm):", value=NA,min=1, max=1000, step=0.01),
numericInput("k", "VBGF Growth coeff. k:", value=NA,min = 0.001, max = 1,step=0.01),
numericInput("t0", "VBGF age at size 0 (t_0)", value=NA,min = -15, max = 15,step=0.01),
numericInput("Amat","Age at maturity (years)", value=NA,min = 0.01, max = 100,step=0.01),
numericInput("Winf","Asym. weight (Winf, in g):", value=NA,min = 0, max = 100000,step=0.1),
numericInput("kw","VBGF Growth coeff. wt. (kw, in g): ", value=NA,min = 0.001, max = 5,step=0.01),
numericInput("Temp","Water temperature (in C):" , value=NA,min = 0.001, max = 60,step=0.01),
numericInput("Wdry","Total dry weight (in g):" ,value=NA,min = 0.01, max = 1000000,step=0.01),
numericInput("Wwet","Total wet weight (in g):" ,value=NA,min = 0.01, max = 1000000,step=0.01),
numericInput("Bl","Body length (cm):",value=NA,min = 0.01, max = 10000,step=0.01),
numericInput("GSI","Gonadosomatic index:",value=NA,min = 0, max = 1,step=0.001),
numericInput("User_M","User M input:",value=NA,min = 0, max = 10,step=0.001),
br(),
br(),
h3("Composite M: method weighting"),
h5(p(em("Allows for weighting of the contribution of each method in the composite M distribution"))),
h5("Values range from 0 to 1. A value of 0 removes the contribution; a value of 1 is full weighting."),
h5("Default values are based on redundancies of methods using similar information."),
h5("For instance,the four max. age methods are given a weight of 0.25, so all weighted together equal 1"),
wellPanel(
fluidRow(
column(4,numericInput("Then_Amax_1","Then_Amax 1",value=0.25,min = 0, max = 1,step=0.001)),
column(4,numericInput("Then_Amax_2","Then_Amax 2",value=0.25,min = 0, max = 1,step=0.001)),
column(4,numericInput("Then_Amax_3","Then_Amax 3",value=0.25,min = 0, max = 1,step=0.001))
),
fluidRow(
column(4,numericInput("Hamel_Amax","Hamel_Amax",value=0.25,min = 0, max = 1,step=0.001)),
column(4,numericInput("AnC","AnC",value=0,min = 0, max = 1,step=0.001)),
column(4,numericInput("Then_VBGF","Then_VBGF",value=0.34,min = 0, max = 1,step=0.001))
),
fluidRow(
column(4,numericInput("Jensen_VBGF_1","Jensen_VBGF 1",value=0.33,min = 0, max = 1,step=0.001)),
column(4,numericInput("Jensen_VBGF_2","Jensen_VBGF 2",value=0.33,min = 0, max = 1,step=0.001)),
column(4,numericInput("Pauly_lt","Pauly_lt",value=0.5,min = 0, max = 1,step=0.001))
),
fluidRow(
column(4,numericInput("Gislason","Gislason",value=1,min = 0, max = 1,step=0.001)),
column(4,numericInput("Chen_Wat","Chen-Wat",value=0.5,min = 0, max = 1,step=0.001)),
column(4,numericInput("Roff","Roff",value=0.5,min = 0, max = 1,step=0.001))
),
fluidRow(
column(4,numericInput("Jensen_Amat","Jensen_Amat",value=0.5,min = 0, max = 1,step=0.001)),
column(4,numericInput("Ri_Ef_Amat","Ri_Ef_Amat",value=0.5,min = 0, max = 1,step=0.001)),
column(4,numericInput("Pauly_wt","Pauly_wt",value=0.5,min = 0, max = 1,step=0.001))
),
fluidRow(
column(4,numericInput("PnW","PnW",value=0.5,min = 0, max = 1,step=0.001)),
column(4,numericInput("Lorenzen","Lorenzen",value=1,min = 0, max = 1,step=0.001)),
column(4,numericInput("Gonosoma","GSI",value=1,min = 0, max = 1,step=0.001))
),
fluidRow(
column(4,numericInput("UserM","User M",value=1,min = 0, max = 1,step=0.001)))
)
),
mainPanel(
h4("Natural mortality (M) estimates by method"),
plotOutput("Mplot"),
h4("Natural mortality (M) values"),
fluidRow(
column(6,tableOutput("Mtable")),
column(6,tableOutput("Mtable2")),
downloadButton('downloadMs', 'Download M values'),
downloadButton('downloadCW_M_a', 'Download Chen-Wat. age-specific M values'),
br(),
br(),
br(),
h4("Composite natural mortality"),
h5(p(em("Blue vertical line indicates median value"))),
plotOutput("Mcomposite"),
downloadButton('downloadMcompositedensityplot', 'Download composite M density plot'),
downloadButton('downloadMcompositedist', 'Download composite M for resampling')
)
)
)
)
)
|
a<-c(1.55, 3.18 ,1.92, 2.83 ,2.84, 2.98 ,4.20, 1.05 ,3.69, 0.74 ,1.84 ,3.22 ,3.77 ,2.91,4.72, 1.90,2.03, 3.70 ,4.10, 4.05, 5.54, 3.18 ,2.89, 4.31, 4.62, 5.45, 1.88 ,2.79,
4.14, 1.02, 7.95, 7.22 ,4.68, 2.26, 2.38, 2.12, 4.25, 1.94 ,2.03, 3.70 ,2.01)
b<-rgamma(41,4.472,1.3726)
qqplot(a,b)
norms = rnorm(1000)
ks.test(norms,'pnorm')
x <- rnorm(50)
y <- runif(30)
# Do x and y come from the same distribution?
ks.test(x, y)
|
/test_project/test/qqplot.R
|
no_license
|
yuanqingye/R_Projects
|
R
| false
| false
| 430
|
r
|
a<-c(1.55, 3.18 ,1.92, 2.83 ,2.84, 2.98 ,4.20, 1.05 ,3.69, 0.74 ,1.84 ,3.22 ,3.77 ,2.91,4.72, 1.90,2.03, 3.70 ,4.10, 4.05, 5.54, 3.18 ,2.89, 4.31, 4.62, 5.45, 1.88 ,2.79,
4.14, 1.02, 7.95, 7.22 ,4.68, 2.26, 2.38, 2.12, 4.25, 1.94 ,2.03, 3.70 ,2.01)
b<-rgamma(41,4.472,1.3726)
qqplot(a,b)
norms = rnorm(1000)
ks.test(norms,'pnorm')
x <- rnorm(50)
y <- runif(30)
# Do x and y come from the same distribution?
ks.test(x, y)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extendedisolationforest.R
\name{h2o.extendedIsolationForest}
\alias{h2o.extendedIsolationForest}
\title{Trains an Extended Isolation Forest model}
\usage{
h2o.extendedIsolationForest(
training_frame,
x,
model_id = NULL,
ignore_const_cols = TRUE,
categorical_encoding = c("AUTO", "Enum", "OneHotInternal", "OneHotExplicit",
"Binary", "Eigen", "LabelEncoder", "SortByResponse", "EnumLimited"),
ntrees = 100,
sample_size = 256,
extension_level = 0,
seed = -1
)
}
\arguments{
\item{training_frame}{Id of the training data frame.}
\item{x}{A vector containing the \code{character} names of the predictors in the model.}
\item{model_id}{Destination id for this model; auto-generated if not specified.}
\item{ignore_const_cols}{\code{Logical}. Ignore constant columns. Defaults to TRUE.}
\item{categorical_encoding}{Encoding scheme for categorical features Must be one of: "AUTO", "Enum", "OneHotInternal", "OneHotExplicit",
"Binary", "Eigen", "LabelEncoder", "SortByResponse", "EnumLimited". Defaults to AUTO.}
\item{ntrees}{Number of Extended Isolation Forest trees. Defaults to 100.}
\item{sample_size}{Number of randomly sampled observations used to train each Extended Isolation Forest tree. Defaults to 256.}
\item{extension_level}{Maximum is N - 1 (N = numCols). Minimum is 0. Extended Isolation Forest with extension_Level = 0 behaves like
Isolation Forest. Defaults to 0.}
\item{seed}{Seed for random numbers (affects certain parts of the algo that are stochastic and those might or might not be enabled by default).
Defaults to -1 (time-based random number).}
}
\description{
Trains an Extended Isolation Forest model
}
\examples{
\dontrun{
library(h2o)
h2o.init()
# Import the prostate dataset
p <- h2o.importFile(path="https://raw.github.com/h2oai/h2o/master/smalldata/logreg/prostate.csv")
# Set the predictors
predictors <- c("AGE","RACE","DPROS","DCAPS","PSA","VOL","GLEASON")
# Build an Extended Isolation forest model
model <- h2o.extendedIsolationForest(x = predictors,
training_frame = p,
model_id = "eif.hex",
ntrees = 100,
sample_size = 256,
extension_level = length(predictors) - 1)
# Calculate score
score <- h2o.predict(model, p)
anomaly_score <- score$anomaly_score
# Number in [0, 1] explicitly defined in Equation (1) from Extended Isolation Forest paper
# or in paragraph '2 Isolation and Isolation Trees' of Isolation Forest paper
anomaly_score <- score$anomaly_score
# Average path length of the point in Isolation Trees from root to the leaf
mean_length <- score$mean_length
}
}
|
/man/h2o.extendedIsolationForest.Rd
|
no_license
|
cran/h2o
|
R
| false
| true
| 2,802
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extendedisolationforest.R
\name{h2o.extendedIsolationForest}
\alias{h2o.extendedIsolationForest}
\title{Trains an Extended Isolation Forest model}
\usage{
h2o.extendedIsolationForest(
training_frame,
x,
model_id = NULL,
ignore_const_cols = TRUE,
categorical_encoding = c("AUTO", "Enum", "OneHotInternal", "OneHotExplicit",
"Binary", "Eigen", "LabelEncoder", "SortByResponse", "EnumLimited"),
ntrees = 100,
sample_size = 256,
extension_level = 0,
seed = -1
)
}
\arguments{
\item{training_frame}{Id of the training data frame.}
\item{x}{A vector containing the \code{character} names of the predictors in the model.}
\item{model_id}{Destination id for this model; auto-generated if not specified.}
\item{ignore_const_cols}{\code{Logical}. Ignore constant columns. Defaults to TRUE.}
\item{categorical_encoding}{Encoding scheme for categorical features Must be one of: "AUTO", "Enum", "OneHotInternal", "OneHotExplicit",
"Binary", "Eigen", "LabelEncoder", "SortByResponse", "EnumLimited". Defaults to AUTO.}
\item{ntrees}{Number of Extended Isolation Forest trees. Defaults to 100.}
\item{sample_size}{Number of randomly sampled observations used to train each Extended Isolation Forest tree. Defaults to 256.}
\item{extension_level}{Maximum is N - 1 (N = numCols). Minimum is 0. Extended Isolation Forest with extension_Level = 0 behaves like
Isolation Forest. Defaults to 0.}
\item{seed}{Seed for random numbers (affects certain parts of the algo that are stochastic and those might or might not be enabled by default).
Defaults to -1 (time-based random number).}
}
\description{
Trains an Extended Isolation Forest model
}
\examples{
\dontrun{
library(h2o)
h2o.init()
# Import the prostate dataset
p <- h2o.importFile(path="https://raw.github.com/h2oai/h2o/master/smalldata/logreg/prostate.csv")
# Set the predictors
predictors <- c("AGE","RACE","DPROS","DCAPS","PSA","VOL","GLEASON")
# Build an Extended Isolation forest model
model <- h2o.extendedIsolationForest(x = predictors,
training_frame = p,
model_id = "eif.hex",
ntrees = 100,
sample_size = 256,
extension_level = length(predictors) - 1)
# Calculate score
score <- h2o.predict(model, p)
anomaly_score <- score$anomaly_score
# Number in [0, 1] explicitly defined in Equation (1) from Extended Isolation Forest paper
# or in paragraph '2 Isolation and Isolation Trees' of Isolation Forest paper
anomaly_score <- score$anomaly_score
# Average path length of the point in Isolation Trees from root to the leaf
mean_length <- score$mean_length
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scClassifyTrainClass.R
\name{cellTypeTrain}
\alias{cellTypeTrain}
\alias{cellTypeTrain,scClassifyTrainModel-method}
\title{Accessors of cellTypeTrain for scClassifyTrainModel}
\usage{
cellTypeTrain(x)
}
\arguments{
\item{x}{A `scClassifyTrainModel` object.}
}
\value{
cellTypeTrain of the scClassifyTrainModel slot
}
\description{
Methods to access various components of the `scClassifyTrainModel` object.
}
\examples{
data(trainClassExample_xin)
cellTypeTrain(trainClassExample_xin)
}
|
/man/cellTypeTrain.Rd
|
no_license
|
SydneyBioX/scClassify
|
R
| false
| true
| 566
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scClassifyTrainClass.R
\name{cellTypeTrain}
\alias{cellTypeTrain}
\alias{cellTypeTrain,scClassifyTrainModel-method}
\title{Accessors of cellTypeTrain for scClassifyTrainModel}
\usage{
cellTypeTrain(x)
}
\arguments{
\item{x}{A `scClassifyTrainModel` object.}
}
\value{
cellTypeTrain of the scClassifyTrainModel slot
}
\description{
Methods to access various components of the `scClassifyTrainModel` object.
}
\examples{
data(trainClassExample_xin)
cellTypeTrain(trainClassExample_xin)
}
|
#' Separate a collapsed column into multiple rows.
#'
#' If a variable contains observations with multiple delimited values, this
#' separates the values and places each one in its own row.
#'
#' @inheritSection gather Rules for selection
#' @inheritParams gather
#' @inheritParams separate
#' @param sep Separator delimiting collapsed values.
#' @export
#' @examples
#'
#' df <- data.frame(
#' x = 1:3,
#' y = c("a", "d,e,f", "g,h"),
#' z = c("1", "2,3,4", "5,6"),
#' stringsAsFactors = FALSE
#' )
#' separate_rows(df, y, z, convert = TRUE)
separate_rows <- function(data, ..., sep = "[^[:alnum:].]+",
convert = FALSE) {
UseMethod("separate_rows")
}
#' @export
separate_rows.default <- function(data, ..., sep = "[^[:alnum:].]+",
convert = FALSE) {
cols <- compat_as_lazy_dots(...)
separate_rows_(data, cols = cols, sep = sep)
}
#' @export
separate_rows.data.frame <- function(data, ..., sep = "[^[:alnum:].]+",
convert = FALSE) {
orig <- data
vars <- unname(tidyselect::vars_select(names(data), ...))
data[vars] <- map(data[vars], stringi::stri_split_regex, sep)
data <- unnest(data, !!! syms(vars))
if (convert) {
data[vars] <- map(data[vars], type.convert, as.is = TRUE)
}
reconstruct_tibble(orig, data, vars)
}
#' @rdname deprecated-se
#' @inheritParams separate_rows
#' @export
separate_rows_ <- function(data, cols, sep = "[^[:alnum:].]+",
convert = FALSE) {
UseMethod("separate_rows_")
}
#' @export
separate_rows_.data.frame <- function(data, cols, sep = "[^[:alnum:].]+",
convert = FALSE) {
cols <- syms(cols)
separate_rows(data, !!! cols, sep = sep, convert = convert)
}
|
/R/separate-rows.R
|
permissive
|
iamjoshbinder/tidyr
|
R
| false
| false
| 1,785
|
r
|
#' Separate a collapsed column into multiple rows.
#'
#' If a variable contains observations with multiple delimited values, this
#' separates the values and places each one in its own row.
#'
#' @inheritSection gather Rules for selection
#' @inheritParams gather
#' @inheritParams separate
#' @param sep Separator delimiting collapsed values.
#' @export
#' @examples
#'
#' df <- data.frame(
#' x = 1:3,
#' y = c("a", "d,e,f", "g,h"),
#' z = c("1", "2,3,4", "5,6"),
#' stringsAsFactors = FALSE
#' )
#' separate_rows(df, y, z, convert = TRUE)
separate_rows <- function(data, ..., sep = "[^[:alnum:].]+",
convert = FALSE) {
UseMethod("separate_rows")
}
#' @export
separate_rows.default <- function(data, ..., sep = "[^[:alnum:].]+",
convert = FALSE) {
cols <- compat_as_lazy_dots(...)
separate_rows_(data, cols = cols, sep = sep)
}
#' @export
separate_rows.data.frame <- function(data, ..., sep = "[^[:alnum:].]+",
convert = FALSE) {
orig <- data
vars <- unname(tidyselect::vars_select(names(data), ...))
data[vars] <- map(data[vars], stringi::stri_split_regex, sep)
data <- unnest(data, !!! syms(vars))
if (convert) {
data[vars] <- map(data[vars], type.convert, as.is = TRUE)
}
reconstruct_tibble(orig, data, vars)
}
#' @rdname deprecated-se
#' @inheritParams separate_rows
#' @export
separate_rows_ <- function(data, cols, sep = "[^[:alnum:].]+",
convert = FALSE) {
UseMethod("separate_rows_")
}
#' @export
separate_rows_.data.frame <- function(data, cols, sep = "[^[:alnum:].]+",
convert = FALSE) {
cols <- syms(cols)
separate_rows(data, !!! cols, sep = sep, convert = convert)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inspect-product.R
\name{inspect_product}
\alias{inspect_product}
\title{Inspect product}
\usage{
inspect_product(res_global, dimension = c(1, 2))
}
\arguments{
\item{res_global}{output of global analysis}
\item{dimension}{dimension to focus, integer vector of length 2}
}
\description{
Evaluate product in global analysis.
}
|
/man/inspect_product.Rd
|
permissive
|
isoletslicer/sensehubr
|
R
| false
| true
| 404
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inspect-product.R
\name{inspect_product}
\alias{inspect_product}
\title{Inspect product}
\usage{
inspect_product(res_global, dimension = c(1, 2))
}
\arguments{
\item{res_global}{output of global analysis}
\item{dimension}{dimension to focus, integer vector of length 2}
}
\description{
Evaluate product in global analysis.
}
|
library(shiny)
library(shinydashboard)
#run-once code
slider_data <- read.csv("slider_data.csv", header=TRUE, sep=",")
Phase1 <- slider_data[,2]
Phase2 <- slider_data[,3]
Phase3 <- slider_data[,4]
Phase4 <- slider_data[,5]
startYear <- 2013
endYear <- 2016
# Define UI for dashboard
ui <- dashboardPage(
dashboardHeader(title = "Data Programming and the Cloud"),
dashboardSidebar(
sidebarMenu(
menuItem(
"Pie Charts!",
tabName = "PieChart_Time",
icon = icon("pie-chart")
),
menuItem(
"Use a Slider",
tabName = "slider_time",
icon = icon("sliders"),
badgeLabel = "New"
),
menuItem(
"Upload a Histogram File",
tabName = "Upload_hist",
icon = icon("bar-chart"),
badgeLabel = "Live"
)
),
img(
src = "R4_DataProgandCloud.JPG",
height = 200,
width = 150
)
),
dashboardBody(tabItems(
# PieChart_Time Content
tabItem(tabName = "PieChart_Time",
fluidRow(
box(
title="PieChart_Time",
status = "warning",
numericInput(
"pie",
"Percent of Pie Chart",
min =0,
max = 100,
value = 50
),
textInput(
"pietext",
"Text Input",
value = "Default Title",
placeholder = "Enter Your Title Here"
),
checkboxInput("pieChoice",
" I want a Pie Chart instead.",
value = FALSE)
),
box(
title = "Graphical Output",
solidHeader = TRUE,
status = "warning",
plotOutput("piePlot")
)
)),
# Slider Tab Content
tabItem(
tabName = "slider_time",
h2("Training Programme Results"),
fluidRow(
box(
title="Control the Academic Year",
status = "primary",
solidHeader = TRUE,
sliderInput(
"ayear",
"Academic Year:",
min = 2014,
max = 2017,
value = 2014,
step =1,
animate = animationOptions(interval = 600, loop = T)
)
),
box(plotOutput("barPlot"))
),
fluidRow(
valueBox(
endYear - startYear,
"Years of Data",
icon = icon("band-aid"),
width =2
)
)
),
# Histogram for an UPloaded CSV
tabItem(tabName = "Upload_hist",
fluidRow(
box(title="File Input",
# Copy the line below to make a file upload manager
fileInput(
"file",
label = h3("histogram Data File input:"),
multiple = FALSE
)),
box(
title = "Data from file input",
collapsible = TRUE,
tableOutput("tabledf")
)
),
fluidRow(
box(tableOutput("tabledf2")),
box(background="blue",
plotOutput("histPlot1"))
))
)),
title = "Dashboard Sampler",
skin = "yellow"
)
##### Server logic to draw histogram
server <- shinyServer(function(input, output) {
output$piePlot <- renderPlot({
# generate pie chart ratios based on input$pie from user
y <- c(input$pie, 100-input$pie)
#draw the pie chart or barplot with specified ratio and label
if(input$pieChoice == FALSE) {
barplot (y, ylim = c(0,100),
names.arg = c(input$pietext, paste0("Complement of ", input$pietext)))
} else {
pie(y, labels = c(input$pietext, paste0("Complement of ", input$pietext)))
}
})
output$barPlot <- renderPlot({
# count values in each phase which mat the correct date
cap <- input$ayear * 100
x <- c(sum(Phase1<cap), sum(Phase2<cap),sum(Phase3<cap),sum(Phase4<cap))
#draw barplot for correct year
barplot( x, names.arg = c("Phase I", "Phase II", "Phase III", "Fellows"),
col = c("deeppink1", "deeppink2", "deeppink3", "deeppink4"),
ylim = c(0,50)
)
})
#### Where input of file happens
output$tabledf <- renderTable({
input$file
})
histData <- reactive ({
file1 <- input$file
read.csv(file1$datapath, header = TRUE, sep=",")
})
output$tabledf2 <- renderTable({
histData()
})
output$histPlot1 <- renderPlot({
hist(as.numeric(histData()$X1))
})
}) # end server
# run application
shinyApp(ui=ui, server=server)
|
/app.R
|
no_license
|
juschan/r_shiny_dashboard
|
R
| false
| false
| 4,894
|
r
|
library(shiny)
library(shinydashboard)
#run-once code
slider_data <- read.csv("slider_data.csv", header=TRUE, sep=",")
Phase1 <- slider_data[,2]
Phase2 <- slider_data[,3]
Phase3 <- slider_data[,4]
Phase4 <- slider_data[,5]
startYear <- 2013
endYear <- 2016
# Define UI for dashboard
ui <- dashboardPage(
dashboardHeader(title = "Data Programming and the Cloud"),
dashboardSidebar(
sidebarMenu(
menuItem(
"Pie Charts!",
tabName = "PieChart_Time",
icon = icon("pie-chart")
),
menuItem(
"Use a Slider",
tabName = "slider_time",
icon = icon("sliders"),
badgeLabel = "New"
),
menuItem(
"Upload a Histogram File",
tabName = "Upload_hist",
icon = icon("bar-chart"),
badgeLabel = "Live"
)
),
img(
src = "R4_DataProgandCloud.JPG",
height = 200,
width = 150
)
),
dashboardBody(tabItems(
# PieChart_Time Content
tabItem(tabName = "PieChart_Time",
fluidRow(
box(
title="PieChart_Time",
status = "warning",
numericInput(
"pie",
"Percent of Pie Chart",
min =0,
max = 100,
value = 50
),
textInput(
"pietext",
"Text Input",
value = "Default Title",
placeholder = "Enter Your Title Here"
),
checkboxInput("pieChoice",
" I want a Pie Chart instead.",
value = FALSE)
),
box(
title = "Graphical Output",
solidHeader = TRUE,
status = "warning",
plotOutput("piePlot")
)
)),
# Slider Tab Content
tabItem(
tabName = "slider_time",
h2("Training Programme Results"),
fluidRow(
box(
title="Control the Academic Year",
status = "primary",
solidHeader = TRUE,
sliderInput(
"ayear",
"Academic Year:",
min = 2014,
max = 2017,
value = 2014,
step =1,
animate = animationOptions(interval = 600, loop = T)
)
),
box(plotOutput("barPlot"))
),
fluidRow(
valueBox(
endYear - startYear,
"Years of Data",
icon = icon("band-aid"),
width =2
)
)
),
# Histogram for an UPloaded CSV
tabItem(tabName = "Upload_hist",
fluidRow(
box(title="File Input",
# Copy the line below to make a file upload manager
fileInput(
"file",
label = h3("histogram Data File input:"),
multiple = FALSE
)),
box(
title = "Data from file input",
collapsible = TRUE,
tableOutput("tabledf")
)
),
fluidRow(
box(tableOutput("tabledf2")),
box(background="blue",
plotOutput("histPlot1"))
))
)),
title = "Dashboard Sampler",
skin = "yellow"
)
##### Server logic to draw histogram
server <- shinyServer(function(input, output) {
output$piePlot <- renderPlot({
# generate pie chart ratios based on input$pie from user
y <- c(input$pie, 100-input$pie)
#draw the pie chart or barplot with specified ratio and label
if(input$pieChoice == FALSE) {
barplot (y, ylim = c(0,100),
names.arg = c(input$pietext, paste0("Complement of ", input$pietext)))
} else {
pie(y, labels = c(input$pietext, paste0("Complement of ", input$pietext)))
}
})
output$barPlot <- renderPlot({
# count values in each phase which mat the correct date
cap <- input$ayear * 100
x <- c(sum(Phase1<cap), sum(Phase2<cap),sum(Phase3<cap),sum(Phase4<cap))
#draw barplot for correct year
barplot( x, names.arg = c("Phase I", "Phase II", "Phase III", "Fellows"),
col = c("deeppink1", "deeppink2", "deeppink3", "deeppink4"),
ylim = c(0,50)
)
})
#### Where input of file happens
output$tabledf <- renderTable({
input$file
})
histData <- reactive ({
file1 <- input$file
read.csv(file1$datapath, header = TRUE, sep=",")
})
output$tabledf2 <- renderTable({
histData()
})
output$histPlot1 <- renderPlot({
hist(as.numeric(histData()$X1))
})
}) # end server
# run application
shinyApp(ui=ui, server=server)
|
/Análisis Datos 4-12-2018/r_tesis_sebas.R
|
no_license
|
DarthEduro/tesis
|
R
| false
| false
| 2,057
|
r
| ||
library(cwhmisc)
### Name: ellipse
### Title: Generate ellipses
### Aliases: ellipseC ellipse1 conf.ellipse
### Keywords: multivariate dplot
### ** Examples
opar <- par(mfrow=c(1,1))
k <- 60; m <- c(0,0); a <- 2; b <- 1; phi <- pi/7
df1 <- 2; df2 <- 20
# show F for different confidence levels:
p <- c(0.5, 0.75, 0.8, 0.95)
qf(p, df1, df2) # 0.717735 1.486984 1.746189 3.492828
el7 <- conf.ellipse(a,b,phi,df1,df2,p[2], k) + m
plot(el7*1.8,type="n",xlab="Different confidence ellipses",ylab="")
lines(conf.ellipse(a,b,phi,df1,df2,p[1],60) + m,lty=2,col="red")
lines(conf.ellipse(a,b,phi,df1,df2,p[3],60) + m,lty=2,col="green")
lines(conf.ellipse(a,b,phi,df1,df2,p[4],60) + m,lty=2,col="blue")
lines(el7,lty=2,col="orange")
leg1 <- paste(as.character(p*100),rep("percent",length(p)),sep="")
# leg1 <- paste(as.character(p*100),rep("%",length(p)),sep="")
col1 <- c("red", "orange","green","blue")
legend(x="bottom",leg1,col=col1,
text.col="black",lty=c(2,2,2,2), merge=TRUE, bg='white', cex=0.9)
par(opar)
for(ii in 0:15){ x <- ellipseC(40,1,2,phi=pi/15*ii);lines(x,col=ii%%3+1)}
|
/data/genthat_extracted_code/cwhmisc/examples/ellipse.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,115
|
r
|
library(cwhmisc)
### Name: ellipse
### Title: Generate ellipses
### Aliases: ellipseC ellipse1 conf.ellipse
### Keywords: multivariate dplot
### ** Examples
opar <- par(mfrow=c(1,1))
k <- 60; m <- c(0,0); a <- 2; b <- 1; phi <- pi/7
df1 <- 2; df2 <- 20
# show F for different confidence levels:
p <- c(0.5, 0.75, 0.8, 0.95)
qf(p, df1, df2) # 0.717735 1.486984 1.746189 3.492828
el7 <- conf.ellipse(a,b,phi,df1,df2,p[2], k) + m
plot(el7*1.8,type="n",xlab="Different confidence ellipses",ylab="")
lines(conf.ellipse(a,b,phi,df1,df2,p[1],60) + m,lty=2,col="red")
lines(conf.ellipse(a,b,phi,df1,df2,p[3],60) + m,lty=2,col="green")
lines(conf.ellipse(a,b,phi,df1,df2,p[4],60) + m,lty=2,col="blue")
lines(el7,lty=2,col="orange")
leg1 <- paste(as.character(p*100),rep("percent",length(p)),sep="")
# leg1 <- paste(as.character(p*100),rep("%",length(p)),sep="")
col1 <- c("red", "orange","green","blue")
legend(x="bottom",leg1,col=col1,
text.col="black",lty=c(2,2,2,2), merge=TRUE, bg='white', cex=0.9)
par(opar)
for(ii in 0:15){ x <- ellipseC(40,1,2,phi=pi/15*ii);lines(x,col=ii%%3+1)}
|
#再帰的にcellに入っている点が作るcellを消す
cell_cnct<-function(i, cell){
if(length(cell[[i]]) > 1){
cell<-cell[-cell[[i]][-1]]
}
if(i+1 < length(cell)){cell_cnct(i+1, cell)}
return(cell)
}
#-------------------------------------------------------------
#ベッチ数自動推定関数群を距離行列変更に対応させる
#proposedMethodOnlyから変形
#bootstrap.homology.mk2を使っているので対応していない
distance_change_method <- function(X,maxdim,maxscale,samples, const.size=0){
aggr1 <- matrix(0,length(X),1)
aggr2 <- matrix(0,length(X),1)
dimnames(aggr1) <- list(paste0("data-set", 1:length(X)),"proposed")
dimnames(aggr2) <- dimnames(aggr1)
for(t in 1:length(X)){
cat("data set", t, "calculating\n")
if(const.size==0){size<-X[[t]]$nsample*(4/5)}
B <- bootstrapper(X[[t]]$noizyX,size,samples)
speak <- bootstrap.homology.mk2(B,maxdim,maxscale)
m5 <- sapply(1:maxdim,function(d)speak[[paste0("dim",d,"dhole")]])
aggr1[t,1] <- m5[1]
aggr2[t,1] <- m5[2]
}
aggrs <- list(aggr1,aggr2)
Xsize<-sapply(1:length(X), function(l){return(nrow(X[[l]][["noizyX"]]))})
if(const.size==0){Bsize<-sapply(1:length(X), function(l){return(nrow(X[[l]][["noizyX"]])*(4/5))})}
else{Bsize<-const.size}
aggrs <- append(aggrs,list(Xsize=Xsize,Xsamples=length(X),
Bsize=Bsize,Bsamples=samples,
maxdim=maxdim,maxscale=maxscale))
class(aggrs) <- "bettiComp"
return(aggrs)
}
#------------------------------------------------------
#距離行列変更後、PH計算--------------------------------
#bootstrap.homology.mk2から変形
dist_changed_pl_peak_count <-function(X,maxdim,maxscale,const.band=0,maximum.thresh = F){
require(TDA)
# require(pracma)
if(!("bootsSamples" %in% class(X))) stop("input must be bootsSamples")
peak <- matrix(0,maxdim,length(X))
# band <- ifelse(const.band > 0,const.band,hausdInterval(X, m=sample.size, B=times, alpha = (1-confidence)))
tseq <- seq(0,maxscale,length.out = 1000)
diags <- lapply(X,function(x)calc_dist_changed_pd(x,maxdim,maxscale))
print(sapply(diags,function(diag)calcDiagCentroid.mk2(diag)[1]))
band <- ifelse(const.band==0,max(sapply(diags,function(diag)calcDiagCentroid.mk2(diag)[1])),const.band)
print(band)
for (t in 1:length(X)) {
land <- lapply(1:maxdim,function(d)landscape(diags[[t]][[1]],dimension = d,KK = 1,tseq = tseq))
if(maximum.thresh) band <- max(sapply(land,max))/4
for(d in 1:maxdim){
peak[d,t] <- calc.landscape.peak(X=land[[d]], thresh = (band*(2*pi)/surface_nshpere(d)), tseq=tseq)
}
}
dimnames(peak) <- list(paste0("dim",1:maxdim),paste0("sample",1:length(X)))
bootstrap.summary <- list(peak=peak)
bootstrap.summary <- append(bootstrap.summary,c(band=band,show.hole.density(peak)))
class(bootstrap.summary) <- "smoothPhom"
return(bootstrap.summary)
}
#--------------------------------------------------------------
#距離関数変更後のパーシステント図を返す------------------------
calc_dist_changed_pd<-function(X, maxdim, maxscale, th_rate=0.8, const_th=0, idx=0){
require(TDA)
require(tidyverse)
if(const_th==0){thresh<-quantile_threshold(th_rate, X)}
if(idx==0){
cell<-cell_set2(X, thresh)
cnct<-connect2(1, cell, all = 1:nrow(X))
red<-reduce_points(X, cnct)
idx<-1:nrow(X) %>% .[-red[[2]]]
}
X_dist<-dist(X) %>% as.matrix()
X_dist[idx, ]<-X_dist[idx, ]-thresh
X_dist[-idx, idx]<-X_dist[-idx, idx]-thresh
X_dist[X_dist < 0]<-0
filt<-ripsFiltration(X = X_dist, maxdimension = 2, maxscale = 3, dist = "arbitrary", library = "Dionysus",
printProgress = T)
pd<-filtrationDiag(filtration = filt, maxdimension = 2, library = "Dionysus", printProgress = T)
return(pd)
}
#--------------------------------------------------------------
#距離行列において指定したインデックスの値を変化させる
dist_mat_change<-function(X_dist, idx, thresh){
X_dist[idx, ]<-X_dist[idx, ]-thresh
X_dist[-idx, idx]<-X_dist[-idx, idx]-thresh
X_dist[idx, idx]<-X_dist[idx, idx]-thresh
X_dist[X_dist < 0]<-0
return(X_dist)
}
#-----------------------------------------------------------------
#距離操作量固定、操作点数の割合を変えてPDを計算する関数-----------
#ratesは操作点数の割合の集合。すべて同一の割合にすれば割合を固定し、操作対象点を変えて計算できる。
#ratesの要素数分PDを計算
select_rate_change_pd<-function(X, rates, thresh){
idx_list<-lapply(rates, function(rate)sample(nrow(X), nrow(X)*rate))
X_dist<-dist(X) %>% as.matrix()
X_dists<-lapply(idx_list, function(idx)dist_mat_change(X_dist = X_dist, idx = idx, thresh = thresh))
pds<-lapply(X_dists, function(dist){
pd<-ripsFiltration(X = dist, maxdimension = 2, maxscale = 3, dist = "arbitrary", library = "Dionysus",
printProgress = T) %>%
filtrationDiag(filtration = ., maxdimension = 2, library = "Dionysus", printProgress = T)
return(c(pd, list(idx)))
})
return(pds)
}
#-----------------------------------------------------------------
#操作対象点固定、操作量を変化させてPH計算する関数----------------
#thesは操作量の集合
manupilate_dist_change_pd<-function(X, idx, thes){
X_dist<-dist(X) %>% as.matrix()
X_dists<-lapply(thes, function(idx)dist_mat_change(X_dist = X_dist, idx = idx, thresh = thes))
pds<-lapply(X_dists, function(dist){
pd<-ripsFiltration(X = dist, maxdimension = 2, maxscale = 3, dist = "arbitrary", library = "Dionysus",
printProgress = T) %>%
filtrationDiag(filtration = ., maxdimension = 2, library = "Dionysus", printProgress = T)
return(pd)
})
return(pds)
}
#-----------------------------------------------------------------------
#変化後の距離行列からサブサンプルを抽出。PDを計算する関数--------------------
#操作量固定
#sub_rateはサブサンプルの割合、n_pdは計算するPDの数
manupulated_dist_mat_subs_pd<-function(X, threth, sub_rate, n_pd){
X_red<-cell_set2(x = X, thresh = threth) %>%
connect2(i = 1, cell_p = ., all = 1:nrow(X)) %>%
reduce_points(X, .)
X_rme<-1:nrow(X) %>% .[-X_red[[2]]]
X_ched_dist<-dist(X) %>% as.matrix() %>%
dist_mat_change(X_dist = ., idx = X_rme, thresh = thresh)
pds<-lapply(1:n_pd, function(k){
idx<-sample(nrow(X), nrow(X)*sub_rate)
pd<-ripsFiltration(X = X_ched_dist[idx, idx], maxdimension = 2, maxscale = 3, dist = "arbitrary", library = "Dionysus",
printProgress = T) %>%
filtrationDiag(filtration = ., maxdimension = 2, library = "Dionysus", printProgress = T)
return(pd)
})
return(pds)
}
#---------------------------------------------
#ランドマーク点を決定する関数-----------------
#Wittness複体を参考に
#Xはポイントクラウドデータ、n_landはランドマーク点の数
#d_mat=TならXに距離行列を入れられる
landmark_points<-function(X, n_land, d_mat=F){
n_land<-as.integer(n_land)
if(d_mat){X_dist<-X}else{X_dist<-dist(X) %>% as.matrix()}
if(n_land == 0){return(numeric(0))}
l_idx<-sample(nrow(X), 1)
if(n_land >= 2){l_idx<-which.max(X_dist[l_idx, ]) %>% c(., l_idx)}
if(n_land > 2){
for (i in 1:(n_land-2)) {
l_idx<-apply(X_dist[-l_idx, l_idx], 1, min) %>% which.max() %>% attributes() %$% as.integer(.) %>% c(., l_idx)
}
}
return(l_idx)
}
#-------------------------------------------------------------
#ベッチ数自動推定関数群を距離行列変更に対応させる------------
#proposedMethodOnlyから変形
#witness複体のランドマーク点を使用
maxmin_distance_change_method <- function(X,maxdim,maxscale,samples, const.size=0, l_rate=0.15, n_vic=10, spar = seq(0,1,0.1)){
aggrs<-lapply(1:maxdim, function(k){
aggr<-matrix(0,length(X),1)
dimnames(aggr) <- list(paste0("data-set", 1:length(X)), "proposed")
return(aggr)
})
for(t in 1:length(X)){
cat("data set", t, "calculating\n")
if(const.size==0){size<-X[[t]]$nsample*(4/5)}
else{size<-const.size}
B <- usephacm:::bootstrapper(X[[t]]$noizyX,size,samples)
speak <- maxmin_dist_changed_pl_peak_count(X = B, maxdim = maxdim, maxscale = maxscale, l_rate = l_rate, n_vic = n_vic, spar = spar)
m5 <- sapply(1:maxdim,function(d)speak[[paste0("dim",d,"mhole")]])
for (i in 1:maxdim) {
aggrs[[i]][t,1]<-m5[i]
}
}
aggrs <- append(aggrs,list(Xsize=sapply(1:length(X), function(l)nrow(X[[l]][["noizyX"]])),Xsamples=length(X),
Bsize=size,Bsamples=samples,
maxdim=maxdim,maxscale=maxscale))
class(aggrs) <- "bettiComp"
return(aggrs)
}
#------------------------------------------------
#距離行列変更後、PH計算・PLの局所最大値をカウント
#bootstrap.homology.mk2から変形
#witness複体のランドマーク点を使用
#calc.landscape.peak(BootstrapHomology-mk1.R)をパッケージ化して置き換えるべし
#usephacm:::calc_diag_centroid(diag)からpersistence_weighted_mean(diag)へ変更
maxmin_dist_changed_pl_peak_count <-function(X, maxdim, maxscale, const.band=0, maximum.thresh = F, l_rate=0.15, n_vic=10, spar = seq(0,1,0.1)){
require(TDA)
if(!("bootsSamples" %in% class(X))) stop("input must be bootsSamples")
peak <- matrix(0,maxdim,length(X))
tseq <- seq(0,maxscale,length.out = 1000)
diags <- lapply(X,function(x)maxmin_dist_changed_pd(x, maxdim, maxscale, l_rate, n_vic)[[1]])
bands<-sapply(diags,function(diag)persistence_weighted_mean(diag))
print(bands)
band <- ifelse(const.band==0, max(bands),const.band)
print(band)
for (t in 1:length(X)) {
land <- lapply(1:maxdim,function(d)landscape(diags[[t]],dimension = d,KK = 1,tseq = tseq))
if(maximum.thresh) band <- max(sapply(land,max))/4
for(d in 1:maxdim){
peak[d,t] <- calc.landscape.peak(X=land[[d]], thresh = (band*(2*pi)/surface_nshpere(d)), tseq=tseq, spar = spar)
}
}
dimnames(peak) <- list(paste0("dim",1:maxdim),paste0("sample",1:length(X)))
bootstrap.summary <- list(peak=peak)
bootstrap.summary <- append(bootstrap.summary,c(band=band,show.hole.density(peak)))
class(bootstrap.summary) <- "smoothPhom"
return(bootstrap.summary)
}
#--------------------------------------------------------------
#距離関数変更後のパーシステント図を返す
#witness複体のランドマーク点に関する距離行列の要素を変化させる
#l_rate=ランドマーク点の割合、n_vics=近傍点の数
#PDとランドマーク点のインデックスを返す
#TDAstats(ripser)で書き換えた
maxmin_dist_changed_pd<-function(X, maxdim, maxscale, l_rate=0.15, n_vic=10){
require(TDA)
require(tidyverse)
require(TDAstats)
X_dist<-dist(X) %>% as.matrix()
#ランドマーク点を求める。l_idx=ランドマーク点のインデックス
l_idx<-landmark_points(X = X_dist, n_land = nrow(X)*l_rate, d_mat = T)
#ランドマーク点の近傍n_vics点の距離の平均を求める
vics_dmean<-sapply(l_idx, function(k){
vic_dmean<-X_dist[k, ] %>% sort() %>% .[2:(nvic+1)] %>% mean()
names(vic_dmean)<-k
return(vic_dmean)
})
#ランドマーク点に関する距離行列の要素を変更
for (i in 1:length(l_idx)) {
X_dist[l_idx[i], ]<-X_dist[l_idx[i], ]-vics_dmean[i]/2
X_dist[, l_idx[i]]<-X_dist[, l_idx[i]]-vics_dmean[i]/2
}
X_dist[X_dist < 0]<-0
pd<-TDAstats::calculate_homology(mat = X_dist, dim = maxdim, threshold = maxscale, format = "distmat")
class(pd)<-"diagram"
return(list(pd=pd, l_idx=l_idx))
}
#-----------------------------------------------
#TDAstasのPDからTDAのPD(diagramクラス)へ変換
as_diag<-function(pd){
class(pd)<-"diagram"
attr(pd, "maxdimension")<-max(pd[,1])
attr(pd, "scale")<-c(0, max(pd[,3]))
colnames(pd)<-c("dimension", "Birth", "Death")
return(pd)
}
#------------------------------------------------
#MPH(?)を計算する関数----------------------------
#ランドマーク点に関する要素において、元々の距離rを使って1-exp(-(x/a)^2)に置き換える
#aはハイパラ
#l_rate=ランドマーク点の割合
#PDとランドマーク点のインデックス、計算時間を返す
multiresolut_homology<-function(X, maxdim, l_rate=0.3, a=1){
X_dist<-dist(X) %>% as.matrix()
#ランドマーク点を求める。l_idx=ランドマーク点のインデックス
l_idx<-landmark_points(X = X_dist, n_land = nrow(X)*l_rate, d_mat = T)
normed_Xdist<-X_dist/max(X_dist)
for (i in l_idx) {
normed_Xdist[i, ]<-1-exp(-(X_dist[i, ]/a)^2)
normed_Xdist[, i]<-1-exp(-(X_dist[, i]/a)^2)
}
time<-system.time(pd<-TDAstats::calculate_homology(mat = normed_Xdist, dim = maxdim, threshold = 1, format = "distmat"))
return(list(pd=pd, l_idx=l_idx, time=time))
}
#------------------------------------------------------
#図中のx-y点間に直線を引く関数-------------------------
#lines関数を書き換えただけ
draw_line<-function(x, y, ...){
lines(c(x[1], y[1]), c(x[2], y[2]), ...)
}
#--------------------------------------------------------------
#距離行列において指定したインデックスの値を変化させる----------
#全体は正規化。変化はFRIによる
#X_dist=距離行列, lands=ランドマーク点, eta=FRIのハイパラ
dist_fri_change<-function(X_dist, lands, eta){
X_dist<-X_dist/max(X_dist)
for (i in lands) {
X_dist[i, ]<-1-exp(-(X_dist[i, ]/eta)^2)
X_dist[, i]<-1-exp(-(X_dist[, i]/eta)^2)
}
X_dist[X_dist < 0]<-0
return(X_dist)
}
#--------------------------------------------------------------
#距離行列において指定したインデックスの値を変化させる
#変化はもとの距離に1-exp(-(d_ij/eta)^2)を掛ける(d_ij=元の距離)
#X_dist=距離行列, lands=ランドマーク点, eta=FRIのハイパラ
dist_wvr_change<-function(X_dist, lands, eta){
if(length(lands)==0){return(X_dist)}
X_chng_dist<-X_dist
for (i in lands) {
X_chng_dist[i, ]<-X_dist[i, ]*(1-exp(-(X_dist[i, ]/eta)^2))
X_chng_dist[, i]<-X_dist[, i]*(1-exp(-(X_dist[, i]/eta)^2))
}
X_chng_dist[X_chng_dist < 0]<-0
return(X_chng_dist)
}
#------------------------------------------------
#WPH(?)を計算する関数----------------------------
#ランドマーク点に関する要素において、元々の距離dに1-exp(-(d/eta)^2)を掛ける
#etaはハイパラ、l_rate=ランドマーク点の割合
#PDとランドマーク点のインデックス、計算時間を返す
weighted_homology<-function(X, maxdim, maxscale, l_rate, eta, ...){
extra_v<-list(...)
if(missing(l_rate)){l_rate<-extra_v$l_rate}
if(missing(eta)){eta<-extra_v$eta}
X_dist<-dist(X) %>% as.matrix()
#ランドマーク点を求める。l_idx=ランドマーク点のインデックス
l_idx<-landmark_points(X = X_dist, n_land = nrow(X)*l_rate, d_mat = T)
X_chng_dist<-dist_wvr_change(X_dist = X_dist, lands = l_idx, eta = eta)
time<-system.time(pd<-TDAstats::calculate_homology(mat = X_chng_dist, dim = maxdim, threshold = maxscale, format = "distmat"))
pds<-list(pd=pd, l_idx=l_idx, time=time)
attr(pds, "l_rate")<-l_rate
attr(pds, "eta")<-eta
return(pds)
}
#----------------------------------
#パーシステンス計算関数----
#usephacmの修正版
calc_per<-function (pd, dim){
assertthat::assert_that((length(dim) == 1) && is_numeric(dim))
pers <- pd[pd[, 1] == dim, 3] - pd[pd[, 1] == dim, 2]
attr(pers, "pers_dim") <- dim
return(pers)
}
#---------------------------------------
#試験的な関数-------------
#上から順に距離を入れ替える----------
dist_element_replace1<-function(pd, dim, distmat){
h2_rows<-which(pd[,1]==dim)
dist_cp<-distmat
dist_cp[upper.tri(dist_cp)]<-0
birth_e<-sapply(h2_rows, function(i)which(dist_cp == pd[i,2], arr.ind = T))
dist_cp<-distmat
for (i in 1:ncol(birth_e)) {
c_eta<-pd[h2_rows[i], 3]/sqrt(log(10))
dist_cp[birth_e[1,i], ]<-distmat[birth_e[1,i],]*( 1-exp( -(distmat[birth_e[1,i], ]/c_eta)^2 ) )
dist_cp[, birth_e[1,i]]<-distmat[, birth_e[1,i]]*( 1-exp( -(distmat[, birth_e[1,i]]/c_eta)^2 ) )
dist_cp[birth_e[2,i], ]<-distmat[birth_e[2,i], ]*( 1-exp( -(distmat[birth_e[2,i], ]/c_eta)^2 ) )
dist_cp[, birth_e[2,i]]<-distmat[, birth_e[2,i]]*( 1-exp( -(distmat[, birth_e[2,i]]/c_eta)^2 ) )
}
return(dist_cp)
}
#---------------------------------------
#重複を取り除いて距離操作----
dist_element_replace_nodupl<-function(pd, dim, distmat){
h2_rows<-which(pd[,1]==dim)
dist_cp<-distmat
dist_cp[upper.tri(dist_cp)]<-0
birth_e<-sapply(h2_rows, function(i)which(dist_cp == pd[i,2], arr.ind = T))
dist_cp<-distmat
p_set<-c()
for (j in 1:ncol(birth_e)) {
pers<-pd[h2_rows[j], 3] - pd[h2_rows[j], 2]
if(birth_e[1, j] %in% p_set[, 1]){
if(pers > p_set[p_set[, 1]==birth_e[1, j], 3]){
death<-pd[h2_rows[j], 3]
p_set[p_set[, 1]==birth_e[1, j], ]<-c(birth_e[1, j], death, pers)
}
}else{
death<-pd[h2_rows[j], 3]
p_set<-rbind(p_set, c(birth_e[1, j], death, pers))
}
if(birth_e[2, j] %in% p_set[, 1]){
if(pers > p_set[p_set[, 1]==birth_e[2, j], 3]){
death<-pd[h2_rows[j], 3]
p_set[p_set[, 1]==birth_e[2, j], ]<-c(birth_e[2, j], death, pers)
}
}else{
death<-pd[h2_rows[j], 3]
p_set<-rbind(p_set, c(birth_e[2, j], death, pers))
}
}
colnames(p_set)<-c("p_idx", "death", "persistence")
p_set %<>% as_tibble()
for (i in 1:nrow(p_set)) {
c_eta<-p_set$death[i]/sqrt(log(10))
dist_cp[p_set$p_idx[i], ]<-distmat[p_set$p_idx[i], ]*( 1-exp( -(distmat[p_set$p_idx[i], ]/c_eta)^2 ) )
dist_cp[, p_set$p_idx[i]]<-distmat[, p_set$p_idx[i]]*( 1-exp( -(distmat[, p_set$p_idx[i]]/c_eta)^2 ) )
}
return(dist_cp)
}
#------------------------------
#発生時刻と消滅時刻が入ったセルが塗られた、データ点間距離のヒストグラムを描画----
#breaks=ヒストグラムの
colored_birth_death_cell_hist<-
function(data, pd, dim, breaks, m_title, distmat = F, eta_line = T, eta, barcode = F,
inflec_line = F, inflec = eta*sqrt(3/2), tile_line = F, ninty_tile=eta*sqrt(log(10)), pd_line = F){
if(inherits(data, "DistmatPD")){
pd<-data$get_pd()
data<-data$distmat
distmat<-T
}
if(missing(breaks)){stop("breaks is missing.")}
if( !("dimension" %in% colnames(pd)) ){stop("pd isn't diagram.")}
if(missing(m_title)){m_title<-substitute(data)}
#dim次元のパーシステントホモロジー群を抽出
pd_Hd<-pd[pd[,1]==dim, ]
if( !(is.matrix(pd_Hd)) ){pd_Hd<-as.matrix(pd_Hd) %>% t()}
#発生時刻の距離が含まれるセルを求める
birth_cell<-map_lgl(seq_along(breaks[-1]), function(i){some(pd_Hd[, 2], ~{(.x > breaks[i]) & (.x <= breaks[i+1])})}) %>% which()
#消滅時刻の距離が含まれるセルを求める
death_cell<-map_lgl(seq_along(breaks[-1]), function(i){some(pd_Hd[, 3], ~{(.x > breaks[i]) & (.x <= breaks[i+1])})}) %>% which()
#cell_col_birth=発生時刻の距離が含まれるセルの色。NAは無色
#"#e4007f=マゼンダ"、4D=アルファ値30%
birth_cell_col<-rep(NA, length = (length(breaks)-1))
birth_cell_col[birth_cell]<-"#e4007f4d"
#cell_col_death=発生時刻の距離が含まれるセルの色。NAは無色。
#"#00a0e94d"=シアン、4D=アルファ値30%
death_cell_col<-rep(NA, length = (length(breaks)-1))
death_cell_col[death_cell]<-"#00a0e94d"
#ヒストグラムを作成する
if(distmat){
#発生時刻が含まれるセルをマゼンダで塗る
hist_birth<-data %>% as.dist() %>% hist(breaks = breaks, col = birth_cell_col, main = m_title)
#消滅時刻が含まれるセルをマゼンダで塗る
hist_death<-data %>% as.dist() %>% hist(breaks = breaks, col = death_cell_col, main = "", add = T)
}
else{
#発生時刻が含まれるセルをマゼンダで塗る
hist_birth<-data %>% dist() %>% hist(breaks = breaks, col = birth_cell_col, main = m_title)
#消滅時刻が含まれるセルをマゼンダで塗る
hist_death<-data %>% dist() %>% hist(breaks = breaks, col = death_cell_col, main = "", add = T)
}
#距離がetaと等しい
if(eta_line && !missing(eta)){
abline(v = eta, col = "green3", lwd = 2)
text(x = eta*1.1, y = max(hist_birth$counts)*0.9, labels = expression(plain(distance) == eta), pos = 3)
}
#距離が変曲点
if(inflec_line){
abline(v = inflec, col = "deeppink", lwd = 2)
text(x = inflec*1.1, y = max(hist_birth$counts)*0.8, labels = "inflection point", pos = 3)
}
#距離が90%点
if(tile_line){
abline(v = ninty_tile, col = "darkviolet", lwd = 2)
text(x = ninty_tile*1.1, y = max(hist_birth$counts)*0.7, labels = "90% point", pos = 3)
}
#生成時刻と消滅時刻をセットで垂直線をプロット
if(pd_line){
for (i in seq_len(nrow(pd_Hd))) {
draw_line(x = c(pd_Hd[i, 2], 0), y = c(pd_Hd[i, 2], max(hist_birth$counts)*0.6), col = rainbow(nrow(pd_Hd))[i] )
draw_line(x = c(pd_Hd[i, 3], 0), y = c(pd_Hd[i, 3], max(hist_birth$counts)*0.6), col = rainbow(nrow(pd_Hd))[i] )
}
}
if(barcode){
par(new = T)
plot_per_barc(pd = pd, dim = dim, xlim = range(breaks), col = "red")
}
return(lst(hist_birth, hist_death))
}
#---------------------------------------
#パーシステントバーコードを描く関数------
#graphicを使い、後からいろいろ操作できるようにする
plot_per_barc<-function(pd, dim, xlim, ylim, col, lwd = 2, ...){
if( !("dimension" %in% colnames(pd)) ){stop("pd mayn't be persistence diagram.")}
if(missing(dim)){dim<-unique(pd[, 1])}
if(!all(dim %in% pd[, 1])){stop("dim isn't correct dimension in persistence diagram.")}
pd_Hd<-pd[(pd[, 1] %in% dim), ]
if( !(is.matrix(pd_Hd)) ){pd_Hd<-as.matrix(pd_Hd) %>% t()}
# if(missing(xlim)){xlim <- c(min(pd_Hd[, 2]), max(pd_Hd[, 3]))}
# if(missing(ylim)){ylim <- c(0, nrow(pd_Hd)+1)}
fill_ifmissing(xlim = c(min(pd_Hd[, 2]), max(pd_Hd[, 3])), ylim = c(0, nrow(pd_Hd)+1),
col = c(1, 2, 4, 3, 5:(5+max(0, max(dim)-3)) )[1:(max(dim)+1)] )
plot(x = pd_Hd[, 2:3], xlim = xlim, ylim = ylim, type = "n", xlab = "", ylab = "",
xaxt = "n", yaxt = "n")
graphics::axis(1)
graphics::title(xlab = "time")
if(length(col) == 1){col<-rep(col, max(dim)+1)}
for (j in seq_len(nrow(pd_Hd))) {
draw_line(x = c(pd_Hd[j, 2], j), y = c(pd_Hd[j, 3], j), col = col[pd_Hd[j, 1]+1], lwd = lwd, ...)
}
}
#-----------------------------------------------
#距離減衰度etaを「発生時刻と消滅時刻の中点」の中央値として距離行列操作----
#中央値・平均値、さらに発生時刻の平均値、消滅時刻の平均値を選択できるようにする
#dim=指定次元。1つのみ指定
mid_median_attenu<-function(pd, dim, distmat, type = c("median", "mean", "birth", "death")){
assertthat::assert_that((length(dim)==1) && is.numeric(dim))
pd_Hd<-pd[pd[,1]==dim, ]
pd_Hd_mid<-apply(pd_Hd, 1, function(x){(x[2]+x[3])/2})
pd_Hd_mid_med<-median(pd_Hd_mid)
pd_Hd_mid_mean<-mean(pd_Hd_mid)
pd_Hd_birth_mean<-mean(pd_Hd[, 2])
pd_Hd_death_mean<-mean(pd_Hd[, 3])
type<-match.arg(type)
eta<-switch(type,
median = pd_Hd_mid_med,
mean = pd_Hd_mid_mean,
birth = pd_Hd_birth_mean,
death = pd_Hd_death_mean
)
distmat[distmat <= eta] <- distmat[distmat <= eta]*( 1-exp(-(distmat[distmat <= eta]/eta)^2) )
return(lst(altdist=distmat, median=pd_Hd_mid_med, mean=pd_Hd_mid_mean, birth = pd_Hd_birth_mean, death = pd_Hd_death_mean, type=type))
}
#---------------------------------------
#フィルトレーション距離速度変化のための関数------
#d*(1-exp(-(d/eta)^2))
mph_exp<-function(d, eta){
return(d*(1-exp(-(d/eta)^2)))
}
#-----------------------------------------------
#距離減衰度etaを「発生時刻と消滅時刻の中点」の平均値として距離行列操作----
#dim=指定次元。1つのみ指定
mid_mean_attenu_slope<-function(pd, dim, distmat, type = c("mean", "birth")){
assertthat::assert_that((length(dim)==1) && is.numeric(dim))
pd_Hd<-pd[pd[,1]==dim, ]
pd_Hd_mid<-apply(pd_Hd, 1, function(x){(x[2]+x[3])/2})
pd_Hd_death_mean<-mean(pd_Hd[, 3])
pd_Hd_birth_mean<-mean(pd_Hd[, 2])
type<-match.arg(type)
eta<-switch (type,
mean = mean(pd_Hd_mid),
birth = pd_Hd_birth_mean
)
slp_seg<-solve(matrix(c(eta, pd_Hd_death_mean, 1, 1), 2, 2), matrix(c(mph_exp(eta, eta), pd_Hd_death_mean)))
distmat[distmat <= eta] %<>% mph_exp(eta)
distmat[(distmat > eta) & (distmat <= pd_Hd_death_mean)] %<>% multiply_by(slp_seg[1]) %>% add(slp_seg[2])
return(lst(altdist=distmat, mid_mean=mean(pd_Hd_mid), birth_mean=pd_Hd_birth_mean, death_mean=pd_Hd_death_mean, type=type))
}
|
/functions_scripts/dist_ch_func.R
|
no_license
|
jetstreamokayasu/distance_ph
|
R
| false
| false
| 26,003
|
r
|
#再帰的にcellに入っている点が作るcellを消す
cell_cnct<-function(i, cell){
if(length(cell[[i]]) > 1){
cell<-cell[-cell[[i]][-1]]
}
if(i+1 < length(cell)){cell_cnct(i+1, cell)}
return(cell)
}
#-------------------------------------------------------------
#ベッチ数自動推定関数群を距離行列変更に対応させる
#proposedMethodOnlyから変形
#bootstrap.homology.mk2を使っているので対応していない
distance_change_method <- function(X,maxdim,maxscale,samples, const.size=0){
aggr1 <- matrix(0,length(X),1)
aggr2 <- matrix(0,length(X),1)
dimnames(aggr1) <- list(paste0("data-set", 1:length(X)),"proposed")
dimnames(aggr2) <- dimnames(aggr1)
for(t in 1:length(X)){
cat("data set", t, "calculating\n")
if(const.size==0){size<-X[[t]]$nsample*(4/5)}
B <- bootstrapper(X[[t]]$noizyX,size,samples)
speak <- bootstrap.homology.mk2(B,maxdim,maxscale)
m5 <- sapply(1:maxdim,function(d)speak[[paste0("dim",d,"dhole")]])
aggr1[t,1] <- m5[1]
aggr2[t,1] <- m5[2]
}
aggrs <- list(aggr1,aggr2)
Xsize<-sapply(1:length(X), function(l){return(nrow(X[[l]][["noizyX"]]))})
if(const.size==0){Bsize<-sapply(1:length(X), function(l){return(nrow(X[[l]][["noizyX"]])*(4/5))})}
else{Bsize<-const.size}
aggrs <- append(aggrs,list(Xsize=Xsize,Xsamples=length(X),
Bsize=Bsize,Bsamples=samples,
maxdim=maxdim,maxscale=maxscale))
class(aggrs) <- "bettiComp"
return(aggrs)
}
#------------------------------------------------------
#距離行列変更後、PH計算--------------------------------
#bootstrap.homology.mk2から変形
dist_changed_pl_peak_count <-function(X,maxdim,maxscale,const.band=0,maximum.thresh = F){
require(TDA)
# require(pracma)
if(!("bootsSamples" %in% class(X))) stop("input must be bootsSamples")
peak <- matrix(0,maxdim,length(X))
# band <- ifelse(const.band > 0,const.band,hausdInterval(X, m=sample.size, B=times, alpha = (1-confidence)))
tseq <- seq(0,maxscale,length.out = 1000)
diags <- lapply(X,function(x)calc_dist_changed_pd(x,maxdim,maxscale))
print(sapply(diags,function(diag)calcDiagCentroid.mk2(diag)[1]))
band <- ifelse(const.band==0,max(sapply(diags,function(diag)calcDiagCentroid.mk2(diag)[1])),const.band)
print(band)
for (t in 1:length(X)) {
land <- lapply(1:maxdim,function(d)landscape(diags[[t]][[1]],dimension = d,KK = 1,tseq = tseq))
if(maximum.thresh) band <- max(sapply(land,max))/4
for(d in 1:maxdim){
peak[d,t] <- calc.landscape.peak(X=land[[d]], thresh = (band*(2*pi)/surface_nshpere(d)), tseq=tseq)
}
}
dimnames(peak) <- list(paste0("dim",1:maxdim),paste0("sample",1:length(X)))
bootstrap.summary <- list(peak=peak)
bootstrap.summary <- append(bootstrap.summary,c(band=band,show.hole.density(peak)))
class(bootstrap.summary) <- "smoothPhom"
return(bootstrap.summary)
}
#--------------------------------------------------------------
#距離関数変更後のパーシステント図を返す------------------------
calc_dist_changed_pd<-function(X, maxdim, maxscale, th_rate=0.8, const_th=0, idx=0){
require(TDA)
require(tidyverse)
if(const_th==0){thresh<-quantile_threshold(th_rate, X)}
if(idx==0){
cell<-cell_set2(X, thresh)
cnct<-connect2(1, cell, all = 1:nrow(X))
red<-reduce_points(X, cnct)
idx<-1:nrow(X) %>% .[-red[[2]]]
}
X_dist<-dist(X) %>% as.matrix()
X_dist[idx, ]<-X_dist[idx, ]-thresh
X_dist[-idx, idx]<-X_dist[-idx, idx]-thresh
X_dist[X_dist < 0]<-0
filt<-ripsFiltration(X = X_dist, maxdimension = 2, maxscale = 3, dist = "arbitrary", library = "Dionysus",
printProgress = T)
pd<-filtrationDiag(filtration = filt, maxdimension = 2, library = "Dionysus", printProgress = T)
return(pd)
}
#--------------------------------------------------------------
#距離行列において指定したインデックスの値を変化させる
dist_mat_change<-function(X_dist, idx, thresh){
X_dist[idx, ]<-X_dist[idx, ]-thresh
X_dist[-idx, idx]<-X_dist[-idx, idx]-thresh
X_dist[idx, idx]<-X_dist[idx, idx]-thresh
X_dist[X_dist < 0]<-0
return(X_dist)
}
#-----------------------------------------------------------------
#距離操作量固定、操作点数の割合を変えてPDを計算する関数-----------
#ratesは操作点数の割合の集合。すべて同一の割合にすれば割合を固定し、操作対象点を変えて計算できる。
#ratesの要素数分PDを計算
select_rate_change_pd<-function(X, rates, thresh){
idx_list<-lapply(rates, function(rate)sample(nrow(X), nrow(X)*rate))
X_dist<-dist(X) %>% as.matrix()
X_dists<-lapply(idx_list, function(idx)dist_mat_change(X_dist = X_dist, idx = idx, thresh = thresh))
pds<-lapply(X_dists, function(dist){
pd<-ripsFiltration(X = dist, maxdimension = 2, maxscale = 3, dist = "arbitrary", library = "Dionysus",
printProgress = T) %>%
filtrationDiag(filtration = ., maxdimension = 2, library = "Dionysus", printProgress = T)
return(c(pd, list(idx)))
})
return(pds)
}
#-----------------------------------------------------------------
#操作対象点固定、操作量を変化させてPH計算する関数----------------
#thesは操作量の集合
manupilate_dist_change_pd<-function(X, idx, thes){
X_dist<-dist(X) %>% as.matrix()
X_dists<-lapply(thes, function(idx)dist_mat_change(X_dist = X_dist, idx = idx, thresh = thes))
pds<-lapply(X_dists, function(dist){
pd<-ripsFiltration(X = dist, maxdimension = 2, maxscale = 3, dist = "arbitrary", library = "Dionysus",
printProgress = T) %>%
filtrationDiag(filtration = ., maxdimension = 2, library = "Dionysus", printProgress = T)
return(pd)
})
return(pds)
}
#-----------------------------------------------------------------------
#変化後の距離行列からサブサンプルを抽出。PDを計算する関数--------------------
#操作量固定
#sub_rateはサブサンプルの割合、n_pdは計算するPDの数
manupulated_dist_mat_subs_pd<-function(X, threth, sub_rate, n_pd){
X_red<-cell_set2(x = X, thresh = threth) %>%
connect2(i = 1, cell_p = ., all = 1:nrow(X)) %>%
reduce_points(X, .)
X_rme<-1:nrow(X) %>% .[-X_red[[2]]]
X_ched_dist<-dist(X) %>% as.matrix() %>%
dist_mat_change(X_dist = ., idx = X_rme, thresh = thresh)
pds<-lapply(1:n_pd, function(k){
idx<-sample(nrow(X), nrow(X)*sub_rate)
pd<-ripsFiltration(X = X_ched_dist[idx, idx], maxdimension = 2, maxscale = 3, dist = "arbitrary", library = "Dionysus",
printProgress = T) %>%
filtrationDiag(filtration = ., maxdimension = 2, library = "Dionysus", printProgress = T)
return(pd)
})
return(pds)
}
#---------------------------------------------
#ランドマーク点を決定する関数-----------------
#Wittness複体を参考に
#Xはポイントクラウドデータ、n_landはランドマーク点の数
#d_mat=TならXに距離行列を入れられる
landmark_points<-function(X, n_land, d_mat=F){
n_land<-as.integer(n_land)
if(d_mat){X_dist<-X}else{X_dist<-dist(X) %>% as.matrix()}
if(n_land == 0){return(numeric(0))}
l_idx<-sample(nrow(X), 1)
if(n_land >= 2){l_idx<-which.max(X_dist[l_idx, ]) %>% c(., l_idx)}
if(n_land > 2){
for (i in 1:(n_land-2)) {
l_idx<-apply(X_dist[-l_idx, l_idx], 1, min) %>% which.max() %>% attributes() %$% as.integer(.) %>% c(., l_idx)
}
}
return(l_idx)
}
#-------------------------------------------------------------
#ベッチ数自動推定関数群を距離行列変更に対応させる------------
#proposedMethodOnlyから変形
#witness複体のランドマーク点を使用
maxmin_distance_change_method <- function(X,maxdim,maxscale,samples, const.size=0, l_rate=0.15, n_vic=10, spar = seq(0,1,0.1)){
aggrs<-lapply(1:maxdim, function(k){
aggr<-matrix(0,length(X),1)
dimnames(aggr) <- list(paste0("data-set", 1:length(X)), "proposed")
return(aggr)
})
for(t in 1:length(X)){
cat("data set", t, "calculating\n")
if(const.size==0){size<-X[[t]]$nsample*(4/5)}
else{size<-const.size}
B <- usephacm:::bootstrapper(X[[t]]$noizyX,size,samples)
speak <- maxmin_dist_changed_pl_peak_count(X = B, maxdim = maxdim, maxscale = maxscale, l_rate = l_rate, n_vic = n_vic, spar = spar)
m5 <- sapply(1:maxdim,function(d)speak[[paste0("dim",d,"mhole")]])
for (i in 1:maxdim) {
aggrs[[i]][t,1]<-m5[i]
}
}
aggrs <- append(aggrs,list(Xsize=sapply(1:length(X), function(l)nrow(X[[l]][["noizyX"]])),Xsamples=length(X),
Bsize=size,Bsamples=samples,
maxdim=maxdim,maxscale=maxscale))
class(aggrs) <- "bettiComp"
return(aggrs)
}
#------------------------------------------------
#距離行列変更後、PH計算・PLの局所最大値をカウント
#bootstrap.homology.mk2から変形
#witness複体のランドマーク点を使用
#calc.landscape.peak(BootstrapHomology-mk1.R)をパッケージ化して置き換えるべし
#usephacm:::calc_diag_centroid(diag)からpersistence_weighted_mean(diag)へ変更
maxmin_dist_changed_pl_peak_count <-function(X, maxdim, maxscale, const.band=0, maximum.thresh = F, l_rate=0.15, n_vic=10, spar = seq(0,1,0.1)){
require(TDA)
if(!("bootsSamples" %in% class(X))) stop("input must be bootsSamples")
peak <- matrix(0,maxdim,length(X))
tseq <- seq(0,maxscale,length.out = 1000)
diags <- lapply(X,function(x)maxmin_dist_changed_pd(x, maxdim, maxscale, l_rate, n_vic)[[1]])
bands<-sapply(diags,function(diag)persistence_weighted_mean(diag))
print(bands)
band <- ifelse(const.band==0, max(bands),const.band)
print(band)
for (t in 1:length(X)) {
land <- lapply(1:maxdim,function(d)landscape(diags[[t]],dimension = d,KK = 1,tseq = tseq))
if(maximum.thresh) band <- max(sapply(land,max))/4
for(d in 1:maxdim){
peak[d,t] <- calc.landscape.peak(X=land[[d]], thresh = (band*(2*pi)/surface_nshpere(d)), tseq=tseq, spar = spar)
}
}
dimnames(peak) <- list(paste0("dim",1:maxdim),paste0("sample",1:length(X)))
bootstrap.summary <- list(peak=peak)
bootstrap.summary <- append(bootstrap.summary,c(band=band,show.hole.density(peak)))
class(bootstrap.summary) <- "smoothPhom"
return(bootstrap.summary)
}
#--------------------------------------------------------------
#距離関数変更後のパーシステント図を返す
#witness複体のランドマーク点に関する距離行列の要素を変化させる
#l_rate=ランドマーク点の割合、n_vics=近傍点の数
#PDとランドマーク点のインデックスを返す
#TDAstats(ripser)で書き換えた
maxmin_dist_changed_pd<-function(X, maxdim, maxscale, l_rate=0.15, n_vic=10){
require(TDA)
require(tidyverse)
require(TDAstats)
X_dist<-dist(X) %>% as.matrix()
#ランドマーク点を求める。l_idx=ランドマーク点のインデックス
l_idx<-landmark_points(X = X_dist, n_land = nrow(X)*l_rate, d_mat = T)
#ランドマーク点の近傍n_vics点の距離の平均を求める
vics_dmean<-sapply(l_idx, function(k){
vic_dmean<-X_dist[k, ] %>% sort() %>% .[2:(nvic+1)] %>% mean()
names(vic_dmean)<-k
return(vic_dmean)
})
#ランドマーク点に関する距離行列の要素を変更
for (i in 1:length(l_idx)) {
X_dist[l_idx[i], ]<-X_dist[l_idx[i], ]-vics_dmean[i]/2
X_dist[, l_idx[i]]<-X_dist[, l_idx[i]]-vics_dmean[i]/2
}
X_dist[X_dist < 0]<-0
pd<-TDAstats::calculate_homology(mat = X_dist, dim = maxdim, threshold = maxscale, format = "distmat")
class(pd)<-"diagram"
return(list(pd=pd, l_idx=l_idx))
}
#-----------------------------------------------
#TDAstasのPDからTDAのPD(diagramクラス)へ変換
as_diag<-function(pd){
class(pd)<-"diagram"
attr(pd, "maxdimension")<-max(pd[,1])
attr(pd, "scale")<-c(0, max(pd[,3]))
colnames(pd)<-c("dimension", "Birth", "Death")
return(pd)
}
#------------------------------------------------
#MPH(?)を計算する関数----------------------------
#ランドマーク点に関する要素において、元々の距離rを使って1-exp(-(x/a)^2)に置き換える
#aはハイパラ
#l_rate=ランドマーク点の割合
#PDとランドマーク点のインデックス、計算時間を返す
multiresolut_homology<-function(X, maxdim, l_rate=0.3, a=1){
X_dist<-dist(X) %>% as.matrix()
#ランドマーク点を求める。l_idx=ランドマーク点のインデックス
l_idx<-landmark_points(X = X_dist, n_land = nrow(X)*l_rate, d_mat = T)
normed_Xdist<-X_dist/max(X_dist)
for (i in l_idx) {
normed_Xdist[i, ]<-1-exp(-(X_dist[i, ]/a)^2)
normed_Xdist[, i]<-1-exp(-(X_dist[, i]/a)^2)
}
time<-system.time(pd<-TDAstats::calculate_homology(mat = normed_Xdist, dim = maxdim, threshold = 1, format = "distmat"))
return(list(pd=pd, l_idx=l_idx, time=time))
}
#------------------------------------------------------
#図中のx-y点間に直線を引く関数-------------------------
#lines関数を書き換えただけ
draw_line<-function(x, y, ...){
lines(c(x[1], y[1]), c(x[2], y[2]), ...)
}
#--------------------------------------------------------------
#距離行列において指定したインデックスの値を変化させる----------
#全体は正規化。変化はFRIによる
#X_dist=距離行列, lands=ランドマーク点, eta=FRIのハイパラ
dist_fri_change<-function(X_dist, lands, eta){
X_dist<-X_dist/max(X_dist)
for (i in lands) {
X_dist[i, ]<-1-exp(-(X_dist[i, ]/eta)^2)
X_dist[, i]<-1-exp(-(X_dist[, i]/eta)^2)
}
X_dist[X_dist < 0]<-0
return(X_dist)
}
#--------------------------------------------------------------
#距離行列において指定したインデックスの値を変化させる
#変化はもとの距離に1-exp(-(d_ij/eta)^2)を掛ける(d_ij=元の距離)
#X_dist=距離行列, lands=ランドマーク点, eta=FRIのハイパラ
dist_wvr_change<-function(X_dist, lands, eta){
if(length(lands)==0){return(X_dist)}
X_chng_dist<-X_dist
for (i in lands) {
X_chng_dist[i, ]<-X_dist[i, ]*(1-exp(-(X_dist[i, ]/eta)^2))
X_chng_dist[, i]<-X_dist[, i]*(1-exp(-(X_dist[, i]/eta)^2))
}
X_chng_dist[X_chng_dist < 0]<-0
return(X_chng_dist)
}
#------------------------------------------------
#WPH(?)を計算する関数----------------------------
#ランドマーク点に関する要素において、元々の距離dに1-exp(-(d/eta)^2)を掛ける
#etaはハイパラ、l_rate=ランドマーク点の割合
#PDとランドマーク点のインデックス、計算時間を返す
weighted_homology<-function(X, maxdim, maxscale, l_rate, eta, ...){
extra_v<-list(...)
if(missing(l_rate)){l_rate<-extra_v$l_rate}
if(missing(eta)){eta<-extra_v$eta}
X_dist<-dist(X) %>% as.matrix()
#ランドマーク点を求める。l_idx=ランドマーク点のインデックス
l_idx<-landmark_points(X = X_dist, n_land = nrow(X)*l_rate, d_mat = T)
X_chng_dist<-dist_wvr_change(X_dist = X_dist, lands = l_idx, eta = eta)
time<-system.time(pd<-TDAstats::calculate_homology(mat = X_chng_dist, dim = maxdim, threshold = maxscale, format = "distmat"))
pds<-list(pd=pd, l_idx=l_idx, time=time)
attr(pds, "l_rate")<-l_rate
attr(pds, "eta")<-eta
return(pds)
}
#----------------------------------
#パーシステンス計算関数----
#usephacmの修正版
calc_per<-function (pd, dim){
assertthat::assert_that((length(dim) == 1) && is_numeric(dim))
pers <- pd[pd[, 1] == dim, 3] - pd[pd[, 1] == dim, 2]
attr(pers, "pers_dim") <- dim
return(pers)
}
#---------------------------------------
#試験的な関数-------------
#上から順に距離を入れ替える----------
dist_element_replace1<-function(pd, dim, distmat){
h2_rows<-which(pd[,1]==dim)
dist_cp<-distmat
dist_cp[upper.tri(dist_cp)]<-0
birth_e<-sapply(h2_rows, function(i)which(dist_cp == pd[i,2], arr.ind = T))
dist_cp<-distmat
for (i in 1:ncol(birth_e)) {
c_eta<-pd[h2_rows[i], 3]/sqrt(log(10))
dist_cp[birth_e[1,i], ]<-distmat[birth_e[1,i],]*( 1-exp( -(distmat[birth_e[1,i], ]/c_eta)^2 ) )
dist_cp[, birth_e[1,i]]<-distmat[, birth_e[1,i]]*( 1-exp( -(distmat[, birth_e[1,i]]/c_eta)^2 ) )
dist_cp[birth_e[2,i], ]<-distmat[birth_e[2,i], ]*( 1-exp( -(distmat[birth_e[2,i], ]/c_eta)^2 ) )
dist_cp[, birth_e[2,i]]<-distmat[, birth_e[2,i]]*( 1-exp( -(distmat[, birth_e[2,i]]/c_eta)^2 ) )
}
return(dist_cp)
}
#---------------------------------------
#重複を取り除いて距離操作----
dist_element_replace_nodupl<-function(pd, dim, distmat){
h2_rows<-which(pd[,1]==dim)
dist_cp<-distmat
dist_cp[upper.tri(dist_cp)]<-0
birth_e<-sapply(h2_rows, function(i)which(dist_cp == pd[i,2], arr.ind = T))
dist_cp<-distmat
p_set<-c()
for (j in 1:ncol(birth_e)) {
pers<-pd[h2_rows[j], 3] - pd[h2_rows[j], 2]
if(birth_e[1, j] %in% p_set[, 1]){
if(pers > p_set[p_set[, 1]==birth_e[1, j], 3]){
death<-pd[h2_rows[j], 3]
p_set[p_set[, 1]==birth_e[1, j], ]<-c(birth_e[1, j], death, pers)
}
}else{
death<-pd[h2_rows[j], 3]
p_set<-rbind(p_set, c(birth_e[1, j], death, pers))
}
if(birth_e[2, j] %in% p_set[, 1]){
if(pers > p_set[p_set[, 1]==birth_e[2, j], 3]){
death<-pd[h2_rows[j], 3]
p_set[p_set[, 1]==birth_e[2, j], ]<-c(birth_e[2, j], death, pers)
}
}else{
death<-pd[h2_rows[j], 3]
p_set<-rbind(p_set, c(birth_e[2, j], death, pers))
}
}
colnames(p_set)<-c("p_idx", "death", "persistence")
p_set %<>% as_tibble()
for (i in 1:nrow(p_set)) {
c_eta<-p_set$death[i]/sqrt(log(10))
dist_cp[p_set$p_idx[i], ]<-distmat[p_set$p_idx[i], ]*( 1-exp( -(distmat[p_set$p_idx[i], ]/c_eta)^2 ) )
dist_cp[, p_set$p_idx[i]]<-distmat[, p_set$p_idx[i]]*( 1-exp( -(distmat[, p_set$p_idx[i]]/c_eta)^2 ) )
}
return(dist_cp)
}
#------------------------------
#発生時刻と消滅時刻が入ったセルが塗られた、データ点間距離のヒストグラムを描画----
#breaks=ヒストグラムの
colored_birth_death_cell_hist<-
function(data, pd, dim, breaks, m_title, distmat = F, eta_line = T, eta, barcode = F,
inflec_line = F, inflec = eta*sqrt(3/2), tile_line = F, ninty_tile=eta*sqrt(log(10)), pd_line = F){
if(inherits(data, "DistmatPD")){
pd<-data$get_pd()
data<-data$distmat
distmat<-T
}
if(missing(breaks)){stop("breaks is missing.")}
if( !("dimension" %in% colnames(pd)) ){stop("pd isn't diagram.")}
if(missing(m_title)){m_title<-substitute(data)}
#dim次元のパーシステントホモロジー群を抽出
pd_Hd<-pd[pd[,1]==dim, ]
if( !(is.matrix(pd_Hd)) ){pd_Hd<-as.matrix(pd_Hd) %>% t()}
#発生時刻の距離が含まれるセルを求める
birth_cell<-map_lgl(seq_along(breaks[-1]), function(i){some(pd_Hd[, 2], ~{(.x > breaks[i]) & (.x <= breaks[i+1])})}) %>% which()
#消滅時刻の距離が含まれるセルを求める
death_cell<-map_lgl(seq_along(breaks[-1]), function(i){some(pd_Hd[, 3], ~{(.x > breaks[i]) & (.x <= breaks[i+1])})}) %>% which()
#cell_col_birth=発生時刻の距離が含まれるセルの色。NAは無色
#"#e4007f=マゼンダ"、4D=アルファ値30%
birth_cell_col<-rep(NA, length = (length(breaks)-1))
birth_cell_col[birth_cell]<-"#e4007f4d"
#cell_col_death=発生時刻の距離が含まれるセルの色。NAは無色。
#"#00a0e94d"=シアン、4D=アルファ値30%
death_cell_col<-rep(NA, length = (length(breaks)-1))
death_cell_col[death_cell]<-"#00a0e94d"
#ヒストグラムを作成する
if(distmat){
#発生時刻が含まれるセルをマゼンダで塗る
hist_birth<-data %>% as.dist() %>% hist(breaks = breaks, col = birth_cell_col, main = m_title)
#消滅時刻が含まれるセルをマゼンダで塗る
hist_death<-data %>% as.dist() %>% hist(breaks = breaks, col = death_cell_col, main = "", add = T)
}
else{
#発生時刻が含まれるセルをマゼンダで塗る
hist_birth<-data %>% dist() %>% hist(breaks = breaks, col = birth_cell_col, main = m_title)
#消滅時刻が含まれるセルをマゼンダで塗る
hist_death<-data %>% dist() %>% hist(breaks = breaks, col = death_cell_col, main = "", add = T)
}
#距離がetaと等しい
if(eta_line && !missing(eta)){
abline(v = eta, col = "green3", lwd = 2)
text(x = eta*1.1, y = max(hist_birth$counts)*0.9, labels = expression(plain(distance) == eta), pos = 3)
}
#距離が変曲点
if(inflec_line){
abline(v = inflec, col = "deeppink", lwd = 2)
text(x = inflec*1.1, y = max(hist_birth$counts)*0.8, labels = "inflection point", pos = 3)
}
#距離が90%点
if(tile_line){
abline(v = ninty_tile, col = "darkviolet", lwd = 2)
text(x = ninty_tile*1.1, y = max(hist_birth$counts)*0.7, labels = "90% point", pos = 3)
}
#生成時刻と消滅時刻をセットで垂直線をプロット
if(pd_line){
for (i in seq_len(nrow(pd_Hd))) {
draw_line(x = c(pd_Hd[i, 2], 0), y = c(pd_Hd[i, 2], max(hist_birth$counts)*0.6), col = rainbow(nrow(pd_Hd))[i] )
draw_line(x = c(pd_Hd[i, 3], 0), y = c(pd_Hd[i, 3], max(hist_birth$counts)*0.6), col = rainbow(nrow(pd_Hd))[i] )
}
}
if(barcode){
par(new = T)
plot_per_barc(pd = pd, dim = dim, xlim = range(breaks), col = "red")
}
return(lst(hist_birth, hist_death))
}
#---------------------------------------
#パーシステントバーコードを描く関数------
#graphicを使い、後からいろいろ操作できるようにする
plot_per_barc<-function(pd, dim, xlim, ylim, col, lwd = 2, ...){
if( !("dimension" %in% colnames(pd)) ){stop("pd mayn't be persistence diagram.")}
if(missing(dim)){dim<-unique(pd[, 1])}
if(!all(dim %in% pd[, 1])){stop("dim isn't correct dimension in persistence diagram.")}
pd_Hd<-pd[(pd[, 1] %in% dim), ]
if( !(is.matrix(pd_Hd)) ){pd_Hd<-as.matrix(pd_Hd) %>% t()}
# if(missing(xlim)){xlim <- c(min(pd_Hd[, 2]), max(pd_Hd[, 3]))}
# if(missing(ylim)){ylim <- c(0, nrow(pd_Hd)+1)}
fill_ifmissing(xlim = c(min(pd_Hd[, 2]), max(pd_Hd[, 3])), ylim = c(0, nrow(pd_Hd)+1),
col = c(1, 2, 4, 3, 5:(5+max(0, max(dim)-3)) )[1:(max(dim)+1)] )
plot(x = pd_Hd[, 2:3], xlim = xlim, ylim = ylim, type = "n", xlab = "", ylab = "",
xaxt = "n", yaxt = "n")
graphics::axis(1)
graphics::title(xlab = "time")
if(length(col) == 1){col<-rep(col, max(dim)+1)}
for (j in seq_len(nrow(pd_Hd))) {
draw_line(x = c(pd_Hd[j, 2], j), y = c(pd_Hd[j, 3], j), col = col[pd_Hd[j, 1]+1], lwd = lwd, ...)
}
}
#-----------------------------------------------
#距離減衰度etaを「発生時刻と消滅時刻の中点」の中央値として距離行列操作----
#中央値・平均値、さらに発生時刻の平均値、消滅時刻の平均値を選択できるようにする
#dim=指定次元。1つのみ指定
mid_median_attenu<-function(pd, dim, distmat, type = c("median", "mean", "birth", "death")){
assertthat::assert_that((length(dim)==1) && is.numeric(dim))
pd_Hd<-pd[pd[,1]==dim, ]
pd_Hd_mid<-apply(pd_Hd, 1, function(x){(x[2]+x[3])/2})
pd_Hd_mid_med<-median(pd_Hd_mid)
pd_Hd_mid_mean<-mean(pd_Hd_mid)
pd_Hd_birth_mean<-mean(pd_Hd[, 2])
pd_Hd_death_mean<-mean(pd_Hd[, 3])
type<-match.arg(type)
eta<-switch(type,
median = pd_Hd_mid_med,
mean = pd_Hd_mid_mean,
birth = pd_Hd_birth_mean,
death = pd_Hd_death_mean
)
distmat[distmat <= eta] <- distmat[distmat <= eta]*( 1-exp(-(distmat[distmat <= eta]/eta)^2) )
return(lst(altdist=distmat, median=pd_Hd_mid_med, mean=pd_Hd_mid_mean, birth = pd_Hd_birth_mean, death = pd_Hd_death_mean, type=type))
}
#---------------------------------------
#フィルトレーション距離速度変化のための関数------
#d*(1-exp(-(d/eta)^2))
mph_exp<-function(d, eta){
return(d*(1-exp(-(d/eta)^2)))
}
#-----------------------------------------------
#距離減衰度etaを「発生時刻と消滅時刻の中点」の平均値として距離行列操作----
#dim=指定次元。1つのみ指定
mid_mean_attenu_slope<-function(pd, dim, distmat, type = c("mean", "birth")){
assertthat::assert_that((length(dim)==1) && is.numeric(dim))
pd_Hd<-pd[pd[,1]==dim, ]
pd_Hd_mid<-apply(pd_Hd, 1, function(x){(x[2]+x[3])/2})
pd_Hd_death_mean<-mean(pd_Hd[, 3])
pd_Hd_birth_mean<-mean(pd_Hd[, 2])
type<-match.arg(type)
eta<-switch (type,
mean = mean(pd_Hd_mid),
birth = pd_Hd_birth_mean
)
slp_seg<-solve(matrix(c(eta, pd_Hd_death_mean, 1, 1), 2, 2), matrix(c(mph_exp(eta, eta), pd_Hd_death_mean)))
distmat[distmat <= eta] %<>% mph_exp(eta)
distmat[(distmat > eta) & (distmat <= pd_Hd_death_mean)] %<>% multiply_by(slp_seg[1]) %>% add(slp_seg[2])
return(lst(altdist=distmat, mid_mean=mean(pd_Hd_mid), birth_mean=pd_Hd_birth_mean, death_mean=pd_Hd_death_mean, type=type))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mpower.R
\name{mpower}
\alias{mpower}
\title{Matrix Power}
\usage{
mpower(A, p, tol = sqrt(.Machine$double.eps))
}
\arguments{
\item{A}{a square symmetrix matrix}
\item{p}{matrix power, not necessarily a positive integer}
\item{tol}{tolerance for determining if the matrix is symmetric}
}
\value{
\code{A} raised to the power \code{p}: \code{A^p}
}
\description{
A simple function to demonstrate the power of a square symmetrix matrix in terms of its eigenvalues and eigenvectors.
}
\details{
The matrix power \code{p} can be a fraction or other non-integer. For example, \code{p=1/2} and
\code{p=1/3} give a square-root and cube-root of the matrix.
Negative powers are also allowed. For example, \code{p=-1} gives the inverse and \code{p=-1/2}
gives the inverse square-root.
}
\seealso{
The \code{\link[expm]{\%^\%}} operator in the \code{expm} package is far more efficient
}
|
/man/mpower.Rd
|
no_license
|
gmonette/matlib
|
R
| false
| true
| 976
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mpower.R
\name{mpower}
\alias{mpower}
\title{Matrix Power}
\usage{
mpower(A, p, tol = sqrt(.Machine$double.eps))
}
\arguments{
\item{A}{a square symmetrix matrix}
\item{p}{matrix power, not necessarily a positive integer}
\item{tol}{tolerance for determining if the matrix is symmetric}
}
\value{
\code{A} raised to the power \code{p}: \code{A^p}
}
\description{
A simple function to demonstrate the power of a square symmetrix matrix in terms of its eigenvalues and eigenvectors.
}
\details{
The matrix power \code{p} can be a fraction or other non-integer. For example, \code{p=1/2} and
\code{p=1/3} give a square-root and cube-root of the matrix.
Negative powers are also allowed. For example, \code{p=-1} gives the inverse and \code{p=-1/2}
gives the inverse square-root.
}
\seealso{
The \code{\link[expm]{\%^\%}} operator in the \code{expm} package is far more efficient
}
|
#' Diag data
#'
#' A dataset containing the age and gender of every individual recorded in 1850
#' census.
#'
#'
#' @format A data frame with 7772 rows and 2 variables:
#'
#' @source {Aalborg census 1850. Data entered by ___}
"export_diag"
|
/R/export_diag.R
|
no_license
|
HF-Research/HTData
|
R
| false
| false
| 240
|
r
|
#' Diag data
#'
#' A dataset containing the age and gender of every individual recorded in 1850
#' census.
#'
#'
#' @format A data frame with 7772 rows and 2 variables:
#'
#' @source {Aalborg census 1850. Data entered by ___}
"export_diag"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Comparisons.R
\name{Get_Apps}
\alias{Get_Apps}
\title{Calculate the Apps for an NBA matchup for a particular nba Season}
\usage{
Get_Apps(HomeTeam, VisitorTeam, Seasondata, nbins = 25)
}
\arguments{
\item{HomeTeam}{Home Team}
\item{VisitorTeam}{Visitor Team}
\item{Seasondata}{The information of shots, it can be downloaded with function
read_season}
\item{nbins}{The number of bins the hexplot for the shot charts are made
(default is 25)}
}
\value{
a dataframe with the offensive apps, defensive apps and home spread
}
\description{
This function takes an NBA season object and calculates de Apps for a
particular matchup.
}
\examples{
data("season2017")
Get_Apps(HomeTeam = "Bos", VisitorTeam = "Was", Seasondata = season2017)
Get_Apps(HomeTeam = "GSW", VisitorTeam = "Cle", Seasondata = season2017)
Get_Apps(HomeTeam = "Cle", VisitorTeam = "GSW", Seasondata = season2017)
}
\seealso{
\code{\link[SpatialBall]{DefShotSeasonGraphTeam}}
\code{\link[SpatialBall]{OffShotSeasonGraphTeam}}
}
\author{
Derek Corcoran <derek.corcoran.barrios@gmail.com>
}
|
/man/Get_Apps.Rd
|
no_license
|
derek-corcoran-barrios/SpatialBall2
|
R
| false
| true
| 1,133
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Comparisons.R
\name{Get_Apps}
\alias{Get_Apps}
\title{Calculate the Apps for an NBA matchup for a particular nba Season}
\usage{
Get_Apps(HomeTeam, VisitorTeam, Seasondata, nbins = 25)
}
\arguments{
\item{HomeTeam}{Home Team}
\item{VisitorTeam}{Visitor Team}
\item{Seasondata}{The information of shots, it can be downloaded with function
read_season}
\item{nbins}{The number of bins the hexplot for the shot charts are made
(default is 25)}
}
\value{
a dataframe with the offensive apps, defensive apps and home spread
}
\description{
This function takes an NBA season object and calculates de Apps for a
particular matchup.
}
\examples{
data("season2017")
Get_Apps(HomeTeam = "Bos", VisitorTeam = "Was", Seasondata = season2017)
Get_Apps(HomeTeam = "GSW", VisitorTeam = "Cle", Seasondata = season2017)
Get_Apps(HomeTeam = "Cle", VisitorTeam = "GSW", Seasondata = season2017)
}
\seealso{
\code{\link[SpatialBall]{DefShotSeasonGraphTeam}}
\code{\link[SpatialBall]{OffShotSeasonGraphTeam}}
}
\author{
Derek Corcoran <derek.corcoran.barrios@gmail.com>
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_describe_instance_attribute}
\alias{ec2_describe_instance_attribute}
\title{Describes the specified attribute of the specified instance}
\usage{
ec2_describe_instance_attribute(Attribute, DryRun, InstanceId)
}
\arguments{
\item{Attribute}{[required] The instance attribute.
Note: The \code{enaSupport} attribute is not supported at this time.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{InstanceId}{[required] The ID of the instance.}
}
\description{
Describes the specified attribute of the specified instance. You can
specify only one attribute at a time. Valid attribute values are:
\code{instanceType} \| \code{kernel} \| \code{ramdisk} \| \code{userData} \|
\code{disableApiTermination} \| \code{instanceInitiatedShutdownBehavior} \|
\code{rootDeviceName} \| \code{blockDeviceMapping} \| \code{productCodes} \|
\code{sourceDestCheck} \| \code{groupSet} \| \code{ebsOptimized} \| \code{sriovNetSupport}
}
\section{Request syntax}{
\preformatted{svc$describe_instance_attribute(
Attribute = "instanceType"|"kernel"|"ramdisk"|"userData"|"disableApiTermination"|"instanceInitiatedShutdownBehavior"|"rootDeviceName"|"blockDeviceMapping"|"productCodes"|"sourceDestCheck"|"groupSet"|"ebsOptimized"|"sriovNetSupport"|"enaSupport",
DryRun = TRUE|FALSE,
InstanceId = "string"
)
}
}
\examples{
# This example describes the instance type of the specified instance.
#
\donttest{svc$describe_instance_attribute(
Attribute = "instanceType",
InstanceId = "i-1234567890abcdef0"
)}
# This example describes the ``disableApiTermination`` attribute of the
# specified instance.
#
\donttest{svc$describe_instance_attribute(
Attribute = "disableApiTermination",
InstanceId = "i-1234567890abcdef0"
)}
# This example describes the ``blockDeviceMapping`` attribute of the
# specified instance.
#
\donttest{svc$describe_instance_attribute(
Attribute = "blockDeviceMapping",
InstanceId = "i-1234567890abcdef0"
)}
}
\keyword{internal}
|
/paws/man/ec2_describe_instance_attribute.Rd
|
permissive
|
peoplecure/paws
|
R
| false
| true
| 2,288
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_describe_instance_attribute}
\alias{ec2_describe_instance_attribute}
\title{Describes the specified attribute of the specified instance}
\usage{
ec2_describe_instance_attribute(Attribute, DryRun, InstanceId)
}
\arguments{
\item{Attribute}{[required] The instance attribute.
Note: The \code{enaSupport} attribute is not supported at this time.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{InstanceId}{[required] The ID of the instance.}
}
\description{
Describes the specified attribute of the specified instance. You can
specify only one attribute at a time. Valid attribute values are:
\code{instanceType} \| \code{kernel} \| \code{ramdisk} \| \code{userData} \|
\code{disableApiTermination} \| \code{instanceInitiatedShutdownBehavior} \|
\code{rootDeviceName} \| \code{blockDeviceMapping} \| \code{productCodes} \|
\code{sourceDestCheck} \| \code{groupSet} \| \code{ebsOptimized} \| \code{sriovNetSupport}
}
\section{Request syntax}{
\preformatted{svc$describe_instance_attribute(
Attribute = "instanceType"|"kernel"|"ramdisk"|"userData"|"disableApiTermination"|"instanceInitiatedShutdownBehavior"|"rootDeviceName"|"blockDeviceMapping"|"productCodes"|"sourceDestCheck"|"groupSet"|"ebsOptimized"|"sriovNetSupport"|"enaSupport",
DryRun = TRUE|FALSE,
InstanceId = "string"
)
}
}
\examples{
# This example describes the instance type of the specified instance.
#
\donttest{svc$describe_instance_attribute(
Attribute = "instanceType",
InstanceId = "i-1234567890abcdef0"
)}
# This example describes the ``disableApiTermination`` attribute of the
# specified instance.
#
\donttest{svc$describe_instance_attribute(
Attribute = "disableApiTermination",
InstanceId = "i-1234567890abcdef0"
)}
# This example describes the ``blockDeviceMapping`` attribute of the
# specified instance.
#
\donttest{svc$describe_instance_attribute(
Attribute = "blockDeviceMapping",
InstanceId = "i-1234567890abcdef0"
)}
}
\keyword{internal}
|
\name{FPDC}
\alias{FPDC}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Factor probabilistic distance clustering
}
\description{ An implementation of FPDC, a probabilistic factor clustering algorithm that involves a linear transformation of variables and a cluster optimizing the PD-clustering criterion
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
FPDC(data = NULL, k = 2, nf = 2, nu = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{ A matrix or data frame such that rows correspond to observations and columns correspond to variables.
%% ~~Describe \code{data} here~~
}
\item{k}{A numerical parameter giving the number of clusters
%% ~~Describe \code{k} here~~
}
\item{nf}{A numerical parameter giving the number of factors for variables
%% ~~Describe \code{nf} here~~
}
\item{nu}{A numerical parameter giving the number of factors for units
%% ~~Describe \code{nu} here~~
}
}
\value{ A class FPDclustering list with components
%% ~Describe the value returned
%% If it is a LIST, use label=l, centers=c, probability=p, JDF=JDF, JDFIter=JDFv, iter=iter, explained
\item{label }{A vector of integers indicating the cluster membership for each unit}
\item{centers }{A matrix of cluster centers}
\item{probability }{A matrix of probability of each point belonging to each cluster}
\item{JDF }{The value of the Joint distance function}
\item{iter}{The number of iterations}
\item{explained }{The explained variability}
\item{data }{the data set}
%% ...
}
\references{
Tortora, C., M. Gettler Summa, M. Marino, and F. Palumbo. \emph{Factor probabilistic distance clustering
(fpdc): a new clustering method for high dimensional data sets}. Advanced in Data Analysis and Classification, 10(4), 441-464, 2016. doi:10.1007/s11634-015-0219-5.
Tortora C., Gettler Summa M., and Palumbo F..
Factor pd-clustering. In Lausen et al., editor, \emph{Algorithms from and for Nature and Life, Studies in Classification}, Data Analysis, and Knowledge Organization DOI 10.1007/978-3-319-00035-011, 115-123, 2013.
Tortora C., \emph{Non-hierarchical clustering methods on factorial subspaces}, 2012.
%% ~put references to the literature/web site here ~
}
\author{Cristina Tortora and Paul D. McNicholas
%% ~~who you are~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{\link{PDC}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
\dontrun{
# Asymmetric data set clustering example (with shape 3).
data('asymmetric3')
x<-asymmetric3[,-1]
#Clustering
fpdas3=FPDC(x,4,3,3)
#Results
table(asymmetric3[,1],fpdas3$label)
Silh(fpdas3$probability)
summary(fpdas3)
plot(fpdas3)
}
\dontrun{
# Asymmetric data set clustering example (with shape 20).
data('asymmetric20')
x<-asymmetric20[,-1]
#Clustering
fpdas20=FPDC(x,4,3,3)
#Results
table(asymmetric20[,1],fpdas20$label)
Silh(fpdas20$probability)
summary(fpdas20)
plot(fpdas20)
}
\dontrun{
# Clustering example with outliers.
data('outliers')
x<-outliers[,-1]
#Clustering
fpdout=FPDC(x,4,5,4)
#Results
table(outliers[,1],fpdout$label)
Silh(fpdout$probability)
summary(fpdout)
plot(fpdout)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
|
/man/FPDC.Rd
|
no_license
|
cran/FPDclustering
|
R
| false
| false
| 3,314
|
rd
|
\name{FPDC}
\alias{FPDC}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Factor probabilistic distance clustering
}
\description{ An implementation of FPDC, a probabilistic factor clustering algorithm that involves a linear transformation of variables and a cluster optimizing the PD-clustering criterion
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
FPDC(data = NULL, k = 2, nf = 2, nu = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{ A matrix or data frame such that rows correspond to observations and columns correspond to variables.
%% ~~Describe \code{data} here~~
}
\item{k}{A numerical parameter giving the number of clusters
%% ~~Describe \code{k} here~~
}
\item{nf}{A numerical parameter giving the number of factors for variables
%% ~~Describe \code{nf} here~~
}
\item{nu}{A numerical parameter giving the number of factors for units
%% ~~Describe \code{nu} here~~
}
}
\value{ A class FPDclustering list with components
%% ~Describe the value returned
%% If it is a LIST, use label=l, centers=c, probability=p, JDF=JDF, JDFIter=JDFv, iter=iter, explained
\item{label }{A vector of integers indicating the cluster membership for each unit}
\item{centers }{A matrix of cluster centers}
\item{probability }{A matrix of probability of each point belonging to each cluster}
\item{JDF }{The value of the Joint distance function}
\item{iter}{The number of iterations}
\item{explained }{The explained variability}
\item{data }{the data set}
%% ...
}
\references{
Tortora, C., M. Gettler Summa, M. Marino, and F. Palumbo. \emph{Factor probabilistic distance clustering
(fpdc): a new clustering method for high dimensional data sets}. Advanced in Data Analysis and Classification, 10(4), 441-464, 2016. doi:10.1007/s11634-015-0219-5.
Tortora C., Gettler Summa M., and Palumbo F..
Factor pd-clustering. In Lausen et al., editor, \emph{Algorithms from and for Nature and Life, Studies in Classification}, Data Analysis, and Knowledge Organization DOI 10.1007/978-3-319-00035-011, 115-123, 2013.
Tortora C., \emph{Non-hierarchical clustering methods on factorial subspaces}, 2012.
%% ~put references to the literature/web site here ~
}
\author{Cristina Tortora and Paul D. McNicholas
%% ~~who you are~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{\link{PDC}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
\dontrun{
# Asymmetric data set clustering example (with shape 3).
data('asymmetric3')
x<-asymmetric3[,-1]
#Clustering
fpdas3=FPDC(x,4,3,3)
#Results
table(asymmetric3[,1],fpdas3$label)
Silh(fpdas3$probability)
summary(fpdas3)
plot(fpdas3)
}
\dontrun{
# Asymmetric data set clustering example (with shape 20).
data('asymmetric20')
x<-asymmetric20[,-1]
#Clustering
fpdas20=FPDC(x,4,3,3)
#Results
table(asymmetric20[,1],fpdas20$label)
Silh(fpdas20$probability)
summary(fpdas20)
plot(fpdas20)
}
\dontrun{
# Clustering example with outliers.
data('outliers')
x<-outliers[,-1]
#Clustering
fpdout=FPDC(x,4,5,4)
#Results
table(outliers[,1],fpdout$label)
Silh(fpdout$probability)
summary(fpdout)
plot(fpdout)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
|
# plot functions for vignettes - draw cartoons / schematic diagrams
#' draw an empty diagram
#'
plot_schematic_blank <- function(height=1) {
RcssCompulsoryClass <- RcssGetCompulsoryClass(c("schematic", "blank"))
parplot(c(0, 1), c(1-height, 1), type="n",
xlim=c(0, 1), ylim=c(1-height, 1))
}
#' draw a horizontal line with a title
add_schematic_line_header <- function(main, xlim, y) {
RcssCompulsoryClass <- RcssGetCompulsoryClass("schematic")
lines(xlim, rep(y, 2), Rcssclass="line_header")
text(mean(xlim), y, main, Rcssclass="line_header")
}
#' draw a cartoon ontology graph
#'
#' @param root numeric of length 2, coordinates for the root node of graph
add_schematic_ontology <- function(root, width=0.3, height=0.1) {
RcssCompulsoryClass <- RcssGetCompulsoryClass("schematic")
leaves <- seq(root[1]-width/2, root[1]+width/2, length=6)
mids <- c(mean(leaves[1:2]), mean(leaves[3:4]), mean(leaves[5:6]))
# draw the edges
edge_y <- root[2] - c(0, 0.5, 1, 0.5, 1)*height
lines(c(root[1], mids[1], leaves[1], mids[1], leaves[2]), edge_y,
Rcssclass="ontology")
lines(c(root[1], mids[2], leaves[3], mids[2], leaves[4]), edge_y,
Rcssclass="ontology")
lines(c(root[1], mids[3], leaves[5], mids[3], leaves[6]), edge_y,
Rcssclass="ontology")
# draw the nodes on top
points(root[1], root[2], Rcssclass="ontology")
points(mids, rep(root[2]-height/2, 3), Rcssclass="ontology")
points(leaves, rep(root[2]-height, 6), Rcssclass="ontology")
}
#' draw content for one term
#'
#' @param pos numeric of length 2, coordinate of top-left corner
#' @param x list, item for crossmap
#' @param max.width numeric, width used to cut long text
#' @param max.lines integer, maximal number of lines to print
#' @param line.height numeric, height of each line
#' @param indent character, prependent to lines to create an appearance
#' of an indented block
#'
add_schematic_term_description <- function(pos, x, max.width=0.3,
max.lines=5,
line.height=0.05,
indent=0.04) {
RcssCompulsoryClass <- RcssGetCompulsoryClass(c("schematic", "term"))
result <- lapply(x, function(term) {
term_result <- c(paste0(term$metadata$id, ":"),
term$data$name, term$data$def, unlist(term$data$parents))
head(shorten(term_result, max.width), max.lines)
})
y <- pos[2]
for (i in seq_along(result)) {
i.data <- result[[i]]
text(pos[1], y, i.data[1])
i.data <- i.data[-1]
for (j in seq_along(i.data)) {
text(pos[1]+indent, y, i.data[j])
y <- y - line.height
}
}
}
#' draw a table, row-by-row
#'
#' @param pos numeric, top-left coordinate of table
#' @param x matrix or data frame
#' @param max.width numeric, width of table
#' @param line.height numeric, height of one table row
add_schematic_table <- function(pos, x, max.width, line.height=0.05) {
RcssCompulsoryClass <- RcssGetCompulsoryClass(c("schematic", "table"))
col.mids <- seq(pos[1]-max.width/2, pos[1]+max.width/2, length=2*ncol(x)+1)
col.mids <- col.mids[seq(2, length(col.mids), by=2)]
text(col.mids, rep(pos[2], length(col.mids)), colnames(x), Rcssclass="header")
lines(pos[1]+c(-max.width/2, max.width/2), rep(pos[2]-line.height/2, 2))
y <- pos[2]-line.height
for (i in seq_len(nrow(x))) {
text(col.mids, rep(y, length(col.mids)), as.character(x[i,]))
y <- y - line.height
}
for (j in seq_along(col.mids)[-1]) {
lines(rep((col.mids[j]+col.mids[j-1])/2, 2), c(y, pos[2])+line.height/2)
}
}
#' draw a small heatmap
add_schematic_heatmap <- function(pos, x, width=0.2, height=0.10,
color="#222222") {
RcssCompulsoryClass <- RcssGetCompulsoryClass(c("schematic", "heatmap"))
boxes_x <- seq(pos[1]-width/2, pos[1]+width/2, length=ncol(x)+1)
boxes_left <- rev(rev(boxes_x)[-1])
boxes_right <- boxes_x[-1]
row.height <- height/nrow(x)
x_trans <- matrix(sprintf("%x", as.integer(x*255)), ncol=ncol(x))
x_trans[x_trans=="0"] <- "00"
for (i in seq_len(nrow(x))) {
rect(boxes_left, rep(pos[2]-(i-1)*row.height, length(boxes_left)),
boxes_right, rep(pos[2]-i*row.height, length(boxes_left)),
col=paste0(color, x_trans[i,]),
Rcssclass="cell")
text(pos[1]-width/2, pos[2]-(i-0.5)*row.height, rownames(x)[i],
Rcssclass="axis", adj=c(1, 0.5))
}
rect(min(boxes_left), pos[2], max(boxes_right), pos[2]-height,
Rcssclass="border")
text(pos[1], pos[2]+0.5*row.height, "phenotypes",
Rcssclass="axis", adj=c(0.5, 0))
}
#' draw closed polygon centered around (x, y), radius r, with n_segments
#'
#' @param center numeric of length 2, coordinates for marker center
#' @param r numeric, size of marker
#' @param label character, text for center of marker
#' @param n_segments integer, number of segments for marker
#' (rectangle, pentagon, hexagon)
#' @param Rcssclass character, style class
draw_knn_marker <- function(center, r, label=NULL, n_segments=5,
Rcssclass=NULL) {
a <- head(seq(0, 2*pi, length=n_segments+1), n_segments)
polygon(center[1]+r*sin(a), center[2]+r*cos(a), Rcssclass=Rcssclass)
text(center[1], center[2], label, Rcssclass=Rcssclass)
}
#' plot a schematic of one node and its neighbors
#'
#' This extracts values from knn {} selector in css
#'
#' @param label character, label for central gene
#' @param neighbors character vector, labels for neighbor nodes
#' @param neighbor_style character vector, css styles for neighbors
#' @param n_segments integer, number of corners on polygons
#' @param Rcssclass character, style class
plot_schematic_knn <- function(label, neighbors, neighbor_style=neighbors,
xlim=c(-1, 1), ylim=c(-1, 1),
Rcssclass=NULL) {
# extract geometry information from css
n_segments <- RcssValue("knn", "n_segments", default=5, Rcssclass=Rcssclass)
r_primary <- RcssValue("knn", "r_primary", default=0.2, Rcssclass=Rcssclass)
r_neighbor <- RcssValue("knn", "r_neighbor", default=0.2, Rcssclass=Rcssclass)
r_knn <- RcssValue("knn", "r_knn", default=0.8, Rcssclass=Rcssclass)
RcssCompulsoryClass <- RcssGetCompulsoryClass(c("schematic", "knn", Rcssclass))
parplot(xlim, ylim, type="n")
n <- length(neighbors)
angles <- head(seq(0, 2*pi, length=n+1), n)
radial_x <- rbind(0, r_knn*sin(angles), NA)
radial_y <- rbind(0, r_knn*cos(angles), NA)
lines(as.numeric(radial_x), as.numeric(radial_y), Rcssclass="radial")
for (i in seq_along(neighbors)) {
draw_knn_marker(c(radial_x[2,i], radial_y[2,i]), r_neighbor,
n_segments=n_segments,
label=neighbors[i], Rcssclass=neighbor_style[i])
}
draw_knn_marker(c(0, 0), r_primary, n_segments=n_segments,
label=label, Rcssclass="primary")
}
#' plot a new chart with a legend for the knn schematic
#'
#' This extracts values from knn {} selector in css
#'
#' @param primary_label character, label for central gene
#' @param neighbor_label character vector, labels for neighbor node
#' @param property named character vector, for drawing color boxes
#' @param markers_x numeric, x-position for markers
#' @param labels_x numeric, x-position for legend labels
#' @param Rcssclass character, style class
#'
plot_schematic_knn_legend <- function(primary_label="",
neighbor_label="",
property=c(abc="abc", xyz="xyz"),
markers_x=-0.75,
labels_x=-0.5,
xlim=c(-1, 1), ylim=c(-1, 1),
Rcssclass="legend") {
# extract geometry information from css
n_segments <- RcssValue("knn", "n_segments", default=5, Rcssclass=Rcssclass)
r_primary <- RcssValue("knn", "r_primary", default=0.2, Rcssclass=Rcssclass)
r_neighbor <- RcssValue("knn", "r_neighbor", default=0.2, Rcssclass=Rcssclass)
line_height <- RcssValue("knn", "line_height", default=0.3,
Rcssclass=Rcssclass)
RcssCompulsoryClass <- RcssGetCompulsoryClass(c("schematic", "knn", Rcssclass))
y <- ylim[2] - line_height
parplot(xlim, ylim, type="n")
# draw two types of markers with labels
draw_knn_marker(c(markers_x, y), r_primary, n_segments=n_segments,
label="", Rcssclass="primary")
text(labels_x, y, primary_label, Rcssclass="legend")
y <- y - line_height
draw_knn_marker(c(markers_x, y), r_neighbor, n_segments=n_segments,
label="", Rcssclass="neighbor")
text(labels_x, y, neighbor_label, Rcssclass="legend")
# draw rectangles with property colors
marker_width <- (labels_x - markers_x)/2
for (i in seq_along(property)) {
y <- y - line_height
rect(markers_x-marker_width, y-line_height/3,
markers_x+marker_width, y+line_height/3,
Rcssclass=names(property[i]))
text(labels_x, y, property[i], Rcssclass="legend")
}
}
#' plot a new chart with an equation explaining neighbor averaging
#'
#' This extracts values from knn {} selector in css
#'
#' @param primary_label character, label for central gene
#' @param neighbor_label character vector, labels for neighbor node
#' @param markers_x numeric, x-position for markers
#' @param labels_x numeric, x-position for legend labels
#' @param eq_x numeric, x-position for components in the equation
#' @param Rcssclass character, style class
#'
plot_schematic_knn_errors <- function(primary_label="",
neighbor_label="",
markers_x=-0.75,
labels_x=-0.5,
eq_x=c(-0.5, 0.0, 0.4, 0.6, 0.9),
xlim=c(-1, 1), ylim=c(-1, 1),
Rcssclass="legend") {
# extract geometry information from css
n_segments <- RcssValue("knn", "n_segments", default=5, Rcssclass=Rcssclass)
r_primary <- RcssValue("knn", "r_primary", default=0.2, Rcssclass=Rcssclass)
r_neighbor <- RcssValue("knn", "r_neighbor", default=0.2, Rcssclass=Rcssclass)
line_height <- RcssValue("knn", "line_height", default=0.3,
Rcssclass=Rcssclass)
RcssCompulsoryClass <- RcssGetCompulsoryClass(c("schematic", "knn", Rcssclass))
y <- ylim[2] - line_height
parplot(xlim, ylim, type="n")
# draw two types of markers with labels
draw_knn_marker(c(markers_x, y), r_primary, n_segments=n_segments,
label="", Rcssclass="primary")
text(labels_x, y, primary_label, Rcssclass="legend")
y <- y - line_height
draw_knn_marker(c(markers_x, y), r_neighbor, n_segments=n_segments,
label="", Rcssclass="neighbor")
text(labels_x, y, neighbor_label, Rcssclass="legend")
# draw formula for average
y <- y - line_height - line_height
text(eq_x[1], y, "error = ", Rcssclass="legend")
lines(rep(eq_x[2]-r_primary*1.25, 2), y+line_height*c(-0.6, 0.6), Rcssclass="norm")
draw_knn_marker(c(eq_x[2], y), r_primary, n_segments=n_segments,
label="", Rcssclass="primary")
text(eq_x[3], y, " - avg (", Rcssclass="legend")
draw_knn_marker(c(eq_x[4], y), r_neighbor, n_segments=n_segments,
label="", Rcssclass="neighbor")
text(eq_x[5], y, ")", Rcssclass="legend")
lines(rep(eq_x[5]+(r_neighbor*0.75), 2), y+line_height*c(-0.6, 0.6), Rcssclass="norm")
}
|
/R/plot_schematics.R
|
permissive
|
tkonopka/mouse-embeddings
|
R
| false
| false
| 11,587
|
r
|
# plot functions for vignettes - draw cartoons / schematic diagrams
#' draw an empty diagram
#'
plot_schematic_blank <- function(height=1) {
RcssCompulsoryClass <- RcssGetCompulsoryClass(c("schematic", "blank"))
parplot(c(0, 1), c(1-height, 1), type="n",
xlim=c(0, 1), ylim=c(1-height, 1))
}
#' draw a horizontal line with a title
add_schematic_line_header <- function(main, xlim, y) {
RcssCompulsoryClass <- RcssGetCompulsoryClass("schematic")
lines(xlim, rep(y, 2), Rcssclass="line_header")
text(mean(xlim), y, main, Rcssclass="line_header")
}
#' draw a cartoon ontology graph
#'
#' @param root numeric of length 2, coordinates for the root node of graph
add_schematic_ontology <- function(root, width=0.3, height=0.1) {
RcssCompulsoryClass <- RcssGetCompulsoryClass("schematic")
leaves <- seq(root[1]-width/2, root[1]+width/2, length=6)
mids <- c(mean(leaves[1:2]), mean(leaves[3:4]), mean(leaves[5:6]))
# draw the edges
edge_y <- root[2] - c(0, 0.5, 1, 0.5, 1)*height
lines(c(root[1], mids[1], leaves[1], mids[1], leaves[2]), edge_y,
Rcssclass="ontology")
lines(c(root[1], mids[2], leaves[3], mids[2], leaves[4]), edge_y,
Rcssclass="ontology")
lines(c(root[1], mids[3], leaves[5], mids[3], leaves[6]), edge_y,
Rcssclass="ontology")
# draw the nodes on top
points(root[1], root[2], Rcssclass="ontology")
points(mids, rep(root[2]-height/2, 3), Rcssclass="ontology")
points(leaves, rep(root[2]-height, 6), Rcssclass="ontology")
}
#' draw content for one term
#'
#' @param pos numeric of length 2, coordinate of top-left corner
#' @param x list, item for crossmap
#' @param max.width numeric, width used to cut long text
#' @param max.lines integer, maximal number of lines to print
#' @param line.height numeric, height of each line
#' @param indent character, prependent to lines to create an appearance
#' of an indented block
#'
add_schematic_term_description <- function(pos, x, max.width=0.3,
max.lines=5,
line.height=0.05,
indent=0.04) {
RcssCompulsoryClass <- RcssGetCompulsoryClass(c("schematic", "term"))
result <- lapply(x, function(term) {
term_result <- c(paste0(term$metadata$id, ":"),
term$data$name, term$data$def, unlist(term$data$parents))
head(shorten(term_result, max.width), max.lines)
})
y <- pos[2]
for (i in seq_along(result)) {
i.data <- result[[i]]
text(pos[1], y, i.data[1])
i.data <- i.data[-1]
for (j in seq_along(i.data)) {
text(pos[1]+indent, y, i.data[j])
y <- y - line.height
}
}
}
#' draw a table, row-by-row
#'
#' @param pos numeric, top-left coordinate of table
#' @param x matrix or data frame
#' @param max.width numeric, width of table
#' @param line.height numeric, height of one table row
add_schematic_table <- function(pos, x, max.width, line.height=0.05) {
RcssCompulsoryClass <- RcssGetCompulsoryClass(c("schematic", "table"))
col.mids <- seq(pos[1]-max.width/2, pos[1]+max.width/2, length=2*ncol(x)+1)
col.mids <- col.mids[seq(2, length(col.mids), by=2)]
text(col.mids, rep(pos[2], length(col.mids)), colnames(x), Rcssclass="header")
lines(pos[1]+c(-max.width/2, max.width/2), rep(pos[2]-line.height/2, 2))
y <- pos[2]-line.height
for (i in seq_len(nrow(x))) {
text(col.mids, rep(y, length(col.mids)), as.character(x[i,]))
y <- y - line.height
}
for (j in seq_along(col.mids)[-1]) {
lines(rep((col.mids[j]+col.mids[j-1])/2, 2), c(y, pos[2])+line.height/2)
}
}
#' draw a small heatmap
add_schematic_heatmap <- function(pos, x, width=0.2, height=0.10,
color="#222222") {
RcssCompulsoryClass <- RcssGetCompulsoryClass(c("schematic", "heatmap"))
boxes_x <- seq(pos[1]-width/2, pos[1]+width/2, length=ncol(x)+1)
boxes_left <- rev(rev(boxes_x)[-1])
boxes_right <- boxes_x[-1]
row.height <- height/nrow(x)
x_trans <- matrix(sprintf("%x", as.integer(x*255)), ncol=ncol(x))
x_trans[x_trans=="0"] <- "00"
for (i in seq_len(nrow(x))) {
rect(boxes_left, rep(pos[2]-(i-1)*row.height, length(boxes_left)),
boxes_right, rep(pos[2]-i*row.height, length(boxes_left)),
col=paste0(color, x_trans[i,]),
Rcssclass="cell")
text(pos[1]-width/2, pos[2]-(i-0.5)*row.height, rownames(x)[i],
Rcssclass="axis", adj=c(1, 0.5))
}
rect(min(boxes_left), pos[2], max(boxes_right), pos[2]-height,
Rcssclass="border")
text(pos[1], pos[2]+0.5*row.height, "phenotypes",
Rcssclass="axis", adj=c(0.5, 0))
}
#' draw closed polygon centered around (x, y), radius r, with n_segments
#'
#' @param center numeric of length 2, coordinates for marker center
#' @param r numeric, size of marker
#' @param label character, text for center of marker
#' @param n_segments integer, number of segments for marker
#' (rectangle, pentagon, hexagon)
#' @param Rcssclass character, style class
draw_knn_marker <- function(center, r, label=NULL, n_segments=5,
Rcssclass=NULL) {
a <- head(seq(0, 2*pi, length=n_segments+1), n_segments)
polygon(center[1]+r*sin(a), center[2]+r*cos(a), Rcssclass=Rcssclass)
text(center[1], center[2], label, Rcssclass=Rcssclass)
}
#' plot a schematic of one node and its neighbors
#'
#' This extracts values from knn {} selector in css
#'
#' @param label character, label for central gene
#' @param neighbors character vector, labels for neighbor nodes
#' @param neighbor_style character vector, css styles for neighbors
#' @param n_segments integer, number of corners on polygons
#' @param Rcssclass character, style class
plot_schematic_knn <- function(label, neighbors, neighbor_style=neighbors,
xlim=c(-1, 1), ylim=c(-1, 1),
Rcssclass=NULL) {
# extract geometry information from css
n_segments <- RcssValue("knn", "n_segments", default=5, Rcssclass=Rcssclass)
r_primary <- RcssValue("knn", "r_primary", default=0.2, Rcssclass=Rcssclass)
r_neighbor <- RcssValue("knn", "r_neighbor", default=0.2, Rcssclass=Rcssclass)
r_knn <- RcssValue("knn", "r_knn", default=0.8, Rcssclass=Rcssclass)
RcssCompulsoryClass <- RcssGetCompulsoryClass(c("schematic", "knn", Rcssclass))
parplot(xlim, ylim, type="n")
n <- length(neighbors)
angles <- head(seq(0, 2*pi, length=n+1), n)
radial_x <- rbind(0, r_knn*sin(angles), NA)
radial_y <- rbind(0, r_knn*cos(angles), NA)
lines(as.numeric(radial_x), as.numeric(radial_y), Rcssclass="radial")
for (i in seq_along(neighbors)) {
draw_knn_marker(c(radial_x[2,i], radial_y[2,i]), r_neighbor,
n_segments=n_segments,
label=neighbors[i], Rcssclass=neighbor_style[i])
}
draw_knn_marker(c(0, 0), r_primary, n_segments=n_segments,
label=label, Rcssclass="primary")
}
#' plot a new chart with a legend for the knn schematic
#'
#' This extracts values from knn {} selector in css
#'
#' @param primary_label character, label for central gene
#' @param neighbor_label character vector, labels for neighbor node
#' @param property named character vector, for drawing color boxes
#' @param markers_x numeric, x-position for markers
#' @param labels_x numeric, x-position for legend labels
#' @param Rcssclass character, style class
#'
plot_schematic_knn_legend <- function(primary_label="",
neighbor_label="",
property=c(abc="abc", xyz="xyz"),
markers_x=-0.75,
labels_x=-0.5,
xlim=c(-1, 1), ylim=c(-1, 1),
Rcssclass="legend") {
# extract geometry information from css
n_segments <- RcssValue("knn", "n_segments", default=5, Rcssclass=Rcssclass)
r_primary <- RcssValue("knn", "r_primary", default=0.2, Rcssclass=Rcssclass)
r_neighbor <- RcssValue("knn", "r_neighbor", default=0.2, Rcssclass=Rcssclass)
line_height <- RcssValue("knn", "line_height", default=0.3,
Rcssclass=Rcssclass)
RcssCompulsoryClass <- RcssGetCompulsoryClass(c("schematic", "knn", Rcssclass))
y <- ylim[2] - line_height
parplot(xlim, ylim, type="n")
# draw two types of markers with labels
draw_knn_marker(c(markers_x, y), r_primary, n_segments=n_segments,
label="", Rcssclass="primary")
text(labels_x, y, primary_label, Rcssclass="legend")
y <- y - line_height
draw_knn_marker(c(markers_x, y), r_neighbor, n_segments=n_segments,
label="", Rcssclass="neighbor")
text(labels_x, y, neighbor_label, Rcssclass="legend")
# draw rectangles with property colors
marker_width <- (labels_x - markers_x)/2
for (i in seq_along(property)) {
y <- y - line_height
rect(markers_x-marker_width, y-line_height/3,
markers_x+marker_width, y+line_height/3,
Rcssclass=names(property[i]))
text(labels_x, y, property[i], Rcssclass="legend")
}
}
#' plot a new chart with an equation explaining neighbor averaging
#'
#' This extracts values from knn {} selector in css
#'
#' @param primary_label character, label for central gene
#' @param neighbor_label character vector, labels for neighbor node
#' @param markers_x numeric, x-position for markers
#' @param labels_x numeric, x-position for legend labels
#' @param eq_x numeric, x-position for components in the equation
#' @param Rcssclass character, style class
#'
plot_schematic_knn_errors <- function(primary_label="",
neighbor_label="",
markers_x=-0.75,
labels_x=-0.5,
eq_x=c(-0.5, 0.0, 0.4, 0.6, 0.9),
xlim=c(-1, 1), ylim=c(-1, 1),
Rcssclass="legend") {
# extract geometry information from css
n_segments <- RcssValue("knn", "n_segments", default=5, Rcssclass=Rcssclass)
r_primary <- RcssValue("knn", "r_primary", default=0.2, Rcssclass=Rcssclass)
r_neighbor <- RcssValue("knn", "r_neighbor", default=0.2, Rcssclass=Rcssclass)
line_height <- RcssValue("knn", "line_height", default=0.3,
Rcssclass=Rcssclass)
RcssCompulsoryClass <- RcssGetCompulsoryClass(c("schematic", "knn", Rcssclass))
y <- ylim[2] - line_height
parplot(xlim, ylim, type="n")
# draw two types of markers with labels
draw_knn_marker(c(markers_x, y), r_primary, n_segments=n_segments,
label="", Rcssclass="primary")
text(labels_x, y, primary_label, Rcssclass="legend")
y <- y - line_height
draw_knn_marker(c(markers_x, y), r_neighbor, n_segments=n_segments,
label="", Rcssclass="neighbor")
text(labels_x, y, neighbor_label, Rcssclass="legend")
# draw formula for average
y <- y - line_height - line_height
text(eq_x[1], y, "error = ", Rcssclass="legend")
lines(rep(eq_x[2]-r_primary*1.25, 2), y+line_height*c(-0.6, 0.6), Rcssclass="norm")
draw_knn_marker(c(eq_x[2], y), r_primary, n_segments=n_segments,
label="", Rcssclass="primary")
text(eq_x[3], y, " - avg (", Rcssclass="legend")
draw_knn_marker(c(eq_x[4], y), r_neighbor, n_segments=n_segments,
label="", Rcssclass="neighbor")
text(eq_x[5], y, ")", Rcssclass="legend")
lines(rep(eq_x[5]+(r_neighbor*0.75), 2), y+line_height*c(-0.6, 0.6), Rcssclass="norm")
}
|
library(ISLR)
?Weekly
attach(Weekly)
pairs(Weekly, col=Direction)
# Logistic regression
glm.fit = glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume, data=Weekly, family=binomial)
summary(glm.fit)
glm.prob = predict(glm.fit, Weekly, type="response")
glm.pred = ifelse(glm.prob > 0.5, "Up", "Down")
table(glm.pred, Direction)
mean(glm.pred == Direction)
# train-test split
train.logical = Year < 2009
?cbind
glm.fit = glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume, data=Weekly, subset=train.logical, family=binomial)
summary(glm.fit)
test.data = Weekly[!train.logical, ]
test.Direction = Direction[!train.logical]
glm.prob = predict(glm.fit, test.data, type="response")
glm.pred = ifelse(glm.prob > 0.5, "Up", "Down")
table(glm.pred, test.Direction)
mean(glm.pred == test.Direction)
# LDA
library(MASS)
lda.fit = lda(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume, data=Weekly, subset=train.logical)
lda.fit
plot(lda.fit)
lda.pred = predict(lda.fit, test.data)
table(lda.pred$class, test.Direction)
mean(lda.pred$class == test.Direction)
# QDA
qda.fit = qda(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume, data=Weekly, subset=train.logical)
qda.fit
qda.pred = predict(qda.fit, test.data)
table(qda.pred$class, test.Direction)
mean(qda.pred$class == test.Direction)
# KNN
library(class)
train.X = cbind(Lag1,Lag2,Lag3,Lag4,Lag5,Volume)[train.logical, ]
test.X = cbind(Lag1,Lag2,Lag3,Lag4,Lag5,Volume)[!train.logical, ]
train.Direction = Direction[train.logical]
knn.fit = knn(train.X, test.X, train.Direction, 1)
table(knn.fit, test.Direction)
mean(knn.fit == test.Direction)
|
/ch4/hw4.R
|
no_license
|
yc3526/stat-learning-exercises
|
R
| false
| false
| 1,564
|
r
|
library(ISLR)
?Weekly
attach(Weekly)
pairs(Weekly, col=Direction)
# Logistic regression
glm.fit = glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume, data=Weekly, family=binomial)
summary(glm.fit)
glm.prob = predict(glm.fit, Weekly, type="response")
glm.pred = ifelse(glm.prob > 0.5, "Up", "Down")
table(glm.pred, Direction)
mean(glm.pred == Direction)
# train-test split
train.logical = Year < 2009
?cbind
glm.fit = glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume, data=Weekly, subset=train.logical, family=binomial)
summary(glm.fit)
test.data = Weekly[!train.logical, ]
test.Direction = Direction[!train.logical]
glm.prob = predict(glm.fit, test.data, type="response")
glm.pred = ifelse(glm.prob > 0.5, "Up", "Down")
table(glm.pred, test.Direction)
mean(glm.pred == test.Direction)
# LDA
library(MASS)
lda.fit = lda(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume, data=Weekly, subset=train.logical)
lda.fit
plot(lda.fit)
lda.pred = predict(lda.fit, test.data)
table(lda.pred$class, test.Direction)
mean(lda.pred$class == test.Direction)
# QDA
qda.fit = qda(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume, data=Weekly, subset=train.logical)
qda.fit
qda.pred = predict(qda.fit, test.data)
table(qda.pred$class, test.Direction)
mean(qda.pred$class == test.Direction)
# KNN
library(class)
train.X = cbind(Lag1,Lag2,Lag3,Lag4,Lag5,Volume)[train.logical, ]
test.X = cbind(Lag1,Lag2,Lag3,Lag4,Lag5,Volume)[!train.logical, ]
train.Direction = Direction[train.logical]
knn.fit = knn(train.X, test.X, train.Direction, 1)
table(knn.fit, test.Direction)
mean(knn.fit == test.Direction)
|
library(tidyverse)
library(survey)
# Loading
cvd_imp =
readRDS(file = '../1 - Data Assembly/Datasets/cvd_IMP.rds')
# Loading dataset and creating censoring time at the end of follow up #
cvd_data =
readRDS(file = '../1 - Data Assembly/Datasets/cvd_final.rds') %>%
left_join(cvd_imp, by = c('SEQN', 'cycle')) %>%
separate(cycle, c("cycle1", "cycle2"), remove=FALSE) %>%
mutate(exam_date = as.Date(ifelse(RIDEXMON == 1,
paste("02/01", cycle2, sep="/"),
paste("08/01", cycle2, sep="/")), "%m/%d/%Y"),
fup_time = as.numeric(round((as.Date("2015-12-31") - exam_date)/(365.25/12))),
time_exm2 = ifelse(mortstat == 1 & cvd_outcome == 0 & fup_time > time_exm,
fup_time,
time_exm),
pce_risk_IMP = pce_risk_IMP*100,
PAG_MINW = PAG_MINW/60) %>%
mutate_at(vars(flag_infnt_sga,
flag_any_brstfd,
flag_any_brstfd_1m,
brstfd,
flag_marit_1,
flag_educ_hs,
flag_parity_gt1), list(~as.factor(.))) %>%
filter(cohort == 1, diet_recall == 1)
##########################
# DEFINING SURVEY DESIGN #
##########################
# Using sampling weight from diet data
nhanes <- svydesign(id=~SDMVPSU,
strata=~SDMVSTRA,
nest=TRUE,
weights=~WTDR_C1,
data=cvd_data)
################
# KAPLAN-MEIER #
################
source('4.3 - KM plots.R')
################
# COX PH MODEL #
################
source('cvd_surv.R')
# Covariates #
cov = c('flag_marit_1',
'flag_educ_hs',
'flag_parity_gt1',
'age_fst_live_brth',
'HEI2015_TOTAL_SCORE',
'PAG_MINW',
'bmi',
'pce_risk')
# Preg #
sga <- cvd_surv(var='flag_infnt_sga', time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
sga2 <- cvd_surv(var='flag_infnt_sga', time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
bf <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
bf2 <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
# Preg + cov #
sga_cov <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
sga_cov2 <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
bf_cov <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
bf_cov2 <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
bind_rows(sga, sga2, bf, bf2, sga_cov, sga_cov2, bf_cov, bf_cov2) %>%
write.csv('Output/surv.csv', row.names = FALSE)
# Shoenfeld Residuals #
# SHOENFELD RESIDUAL
plot_resid <- function(fit, title){
scho <- cox.zph(fit)
p <- ifelse(dim(scho$table)[1] == 1, 1, dim(scho$table)[1]-1)
for(i in 1:p){
plot(scho[i], col.lab = "transparent")
title(main=title[i], ylab="Schoenfeld Residuals")
#legend('topleft',
#legend = paste("p-value", ifelse(round(scho$table[i,3],3)>=0.001, format(round(scho$table[i,3], 3), nsmall = 3), '< 0.001'), sep = ": "), box.lty = 0)
abline(0, 0, col = 'red', lwd = 2)
}
}
pdf(file = 'Output/schoenfeld_cause_spec.pdf', width = 8, height = 8)
par(mfrow = c(2,2), mar = c(2, 4, 3, 1) + 0.1)
plot_resid(fit1_sga, title="CVD death - SGA")
plot_resid(fit2_sga, title="CVD death + HTN/DM - SGA")
plot_resid(fit1_bf, title="CVD death - BF")
plot_resid(fit2_bf, title=" CVD death + HTN/DM - BF")
par(mfrow = c(4,2), mar = c(2, 4, 3, 1) + 0.1)
plot_resid(fit1_sga_pce, title=c("CVD death - SGA", "CVD death - PCE"))
plot_resid(fit2_sga_pce, title=c("CVD death + HTN/DM - SGA", "CVD death + HTN/DM - PCE"))
plot_resid(fit1_bf_pce, title=c("CVD death - BF", "CVD death - PCE"))
plot_resid(fit2_bf_pce, title=c("CVD death + HTN/DM - BF", "CVD death + HTN/DM - PCE"))
par(mfrow = c(1,1), mar = c(2, 4, 3, 1) + 0.1)
dev.off()
#####################################
# FINE-GRAY MODEL - COMPETING RISKS #
#####################################
sga <- cvd_surv(var='flag_infnt_sga', time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
sga2 <- cvd_surv(var='flag_infnt_sga', time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
bf <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
bf2 <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
# Preg + cov #
sga_cov <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
sga_cov2 <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
bf_cov <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
bf_cov2 <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
bind_rows(sga, sga2, bf, bf2, sga_cov, sga_cov2, bf_cov, bf_cov2) %>%
write.csv('Output/surv2.csv', row.names = FALSE)
#################################
# COX PH MODEL - IMPUTED VALUES #
#################################
# Covariates #
cov = c('flag_marit_1_IMP',
'flag_educ_hs_IMP',
'flag_parity_gt1_IMP',
'age_fst_live_brth_IMP',
'HEI2015_TOTAL_SCORE',
'PAG_MINW',
'bmi_IMP',
'pce_risk_IMP')
# Preg #
sga <- cvd_surv(var='flag_infnt_sga', time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
sga2 <- cvd_surv(var='flag_infnt_sga', time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
bf <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
bf2 <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
# Preg + cov #
sga_cov <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
sga_cov2 <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
bf_cov <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
bf_cov2 <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
bind_rows(sga, sga2, bf, bf2, sga_cov, sga_cov2, bf_cov, bf_cov2) %>%
write.csv('Output/surv_IMP.csv', row.names = FALSE)
######################################################
# FINE-GRAY MODEL - COMPETING RISKS - IMPUTED VALUES #
######################################################
sga <- cvd_surv(var='flag_infnt_sga', time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
sga2 <- cvd_surv(var='flag_infnt_sga', time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
bf <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
bf2 <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
# Preg + cov #
sga_cov <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
sga_cov2 <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
bf_cov <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
bf_cov2 <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
bind_rows(sga, sga2, bf, bf2, sga_cov, sga_cov2, bf_cov, bf_cov2) %>%
write.csv('Output/surv2_IMP.csv', row.names = FALSE)
|
/2 - Data Analysis/4 - Survival Analysis.R
|
no_license
|
tamytsujimoto/cvd_pregancy
|
R
| false
| false
| 7,825
|
r
|
library(tidyverse)
library(survey)
# Loading
cvd_imp =
readRDS(file = '../1 - Data Assembly/Datasets/cvd_IMP.rds')
# Loading dataset and creating censoring time at the end of follow up #
cvd_data =
readRDS(file = '../1 - Data Assembly/Datasets/cvd_final.rds') %>%
left_join(cvd_imp, by = c('SEQN', 'cycle')) %>%
separate(cycle, c("cycle1", "cycle2"), remove=FALSE) %>%
mutate(exam_date = as.Date(ifelse(RIDEXMON == 1,
paste("02/01", cycle2, sep="/"),
paste("08/01", cycle2, sep="/")), "%m/%d/%Y"),
fup_time = as.numeric(round((as.Date("2015-12-31") - exam_date)/(365.25/12))),
time_exm2 = ifelse(mortstat == 1 & cvd_outcome == 0 & fup_time > time_exm,
fup_time,
time_exm),
pce_risk_IMP = pce_risk_IMP*100,
PAG_MINW = PAG_MINW/60) %>%
mutate_at(vars(flag_infnt_sga,
flag_any_brstfd,
flag_any_brstfd_1m,
brstfd,
flag_marit_1,
flag_educ_hs,
flag_parity_gt1), list(~as.factor(.))) %>%
filter(cohort == 1, diet_recall == 1)
##########################
# DEFINING SURVEY DESIGN #
##########################
# Using sampling weight from diet data
nhanes <- svydesign(id=~SDMVPSU,
strata=~SDMVSTRA,
nest=TRUE,
weights=~WTDR_C1,
data=cvd_data)
################
# KAPLAN-MEIER #
################
source('4.3 - KM plots.R')
################
# COX PH MODEL #
################
source('cvd_surv.R')
# Covariates #
cov = c('flag_marit_1',
'flag_educ_hs',
'flag_parity_gt1',
'age_fst_live_brth',
'HEI2015_TOTAL_SCORE',
'PAG_MINW',
'bmi',
'pce_risk')
# Preg #
sga <- cvd_surv(var='flag_infnt_sga', time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
sga2 <- cvd_surv(var='flag_infnt_sga', time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
bf <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
bf2 <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
# Preg + cov #
sga_cov <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
sga_cov2 <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
bf_cov <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
bf_cov2 <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
bind_rows(sga, sga2, bf, bf2, sga_cov, sga_cov2, bf_cov, bf_cov2) %>%
write.csv('Output/surv.csv', row.names = FALSE)
# Shoenfeld Residuals #
# SHOENFELD RESIDUAL
plot_resid <- function(fit, title){
scho <- cox.zph(fit)
p <- ifelse(dim(scho$table)[1] == 1, 1, dim(scho$table)[1]-1)
for(i in 1:p){
plot(scho[i], col.lab = "transparent")
title(main=title[i], ylab="Schoenfeld Residuals")
#legend('topleft',
#legend = paste("p-value", ifelse(round(scho$table[i,3],3)>=0.001, format(round(scho$table[i,3], 3), nsmall = 3), '< 0.001'), sep = ": "), box.lty = 0)
abline(0, 0, col = 'red', lwd = 2)
}
}
pdf(file = 'Output/schoenfeld_cause_spec.pdf', width = 8, height = 8)
par(mfrow = c(2,2), mar = c(2, 4, 3, 1) + 0.1)
plot_resid(fit1_sga, title="CVD death - SGA")
plot_resid(fit2_sga, title="CVD death + HTN/DM - SGA")
plot_resid(fit1_bf, title="CVD death - BF")
plot_resid(fit2_bf, title=" CVD death + HTN/DM - BF")
par(mfrow = c(4,2), mar = c(2, 4, 3, 1) + 0.1)
plot_resid(fit1_sga_pce, title=c("CVD death - SGA", "CVD death - PCE"))
plot_resid(fit2_sga_pce, title=c("CVD death + HTN/DM - SGA", "CVD death + HTN/DM - PCE"))
plot_resid(fit1_bf_pce, title=c("CVD death - BF", "CVD death - PCE"))
plot_resid(fit2_bf_pce, title=c("CVD death + HTN/DM - BF", "CVD death + HTN/DM - PCE"))
par(mfrow = c(1,1), mar = c(2, 4, 3, 1) + 0.1)
dev.off()
#####################################
# FINE-GRAY MODEL - COMPETING RISKS #
#####################################
sga <- cvd_surv(var='flag_infnt_sga', time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
sga2 <- cvd_surv(var='flag_infnt_sga', time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
bf <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
bf2 <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
# Preg + cov #
sga_cov <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
sga_cov2 <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
bf_cov <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
bf_cov2 <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
bind_rows(sga, sga2, bf, bf2, sga_cov, sga_cov2, bf_cov, bf_cov2) %>%
write.csv('Output/surv2.csv', row.names = FALSE)
#################################
# COX PH MODEL - IMPUTED VALUES #
#################################
# Covariates #
cov = c('flag_marit_1_IMP',
'flag_educ_hs_IMP',
'flag_parity_gt1_IMP',
'age_fst_live_brth_IMP',
'HEI2015_TOTAL_SCORE',
'PAG_MINW',
'bmi_IMP',
'pce_risk_IMP')
# Preg #
sga <- cvd_surv(var='flag_infnt_sga', time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
sga2 <- cvd_surv(var='flag_infnt_sga', time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
bf <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
bf2 <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
# Preg + cov #
sga_cov <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
sga_cov2 <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
bf_cov <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm', out='cvd_outcome', subpop='flag_subpop')
bf_cov2 <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm', out='cvd_outcome2', subpop='flag_subpop')
bind_rows(sga, sga2, bf, bf2, sga_cov, sga_cov2, bf_cov, bf_cov2) %>%
write.csv('Output/surv_IMP.csv', row.names = FALSE)
######################################################
# FINE-GRAY MODEL - COMPETING RISKS - IMPUTED VALUES #
######################################################
sga <- cvd_surv(var='flag_infnt_sga', time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
sga2 <- cvd_surv(var='flag_infnt_sga', time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
bf <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
bf2 <- cvd_surv(var='flag_any_brstfd_1m', time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
# Preg + cov #
sga_cov <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
sga_cov2 <- cvd_surv(var='flag_infnt_sga', cov = cov, time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
bf_cov <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm2', out='cvd_outcome', subpop='flag_subpop')
bf_cov2 <- cvd_surv(var='flag_any_brstfd_1m', cov = cov, time = 'time_exm2', out='cvd_outcome2', subpop='flag_subpop')
bind_rows(sga, sga2, bf, bf2, sga_cov, sga_cov2, bf_cov, bf_cov2) %>%
write.csv('Output/surv2_IMP.csv', row.names = FALSE)
|
#load datasets
#load entire cleaned data (154009 lines)
VIP_data_all <- read.csv("../VIP_data/VIP_170206_cleaned.csv", header = TRUE, sep = ",", row.names = NULL, fill=TRUE)
#visit 1 and visit2 data
VIP_data_subset_visit1_complete_cases<- read.csv("../VIP_data/VIP_data_subset_visit1_complete_cases.csv", header = TRUE, sep = ",", row.names = NULL, fill=TRUE)
VIP_data_subset_visit2_complete_cases<- read.csv("../VIP_data/VIP_data_subset_visit2_complete_cases.csv", header = TRUE, sep = ",", row.names = NULL, fill=TRUE)
#resistance_case_control_susceptible
#resistance_continuous
#resistance_case_control_compliant
#susceptible_case_control_compliant
#case_02_control_compliant
#case_12_control_compliant
#case_01_control_compliant
#case_10_control_compliant
#biclassstrict 01
visit1_subjects_biclass_strict_0<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor_strict==0]
length(visit1_subjects_biclass_strict_0)
visit2_subjects_biclass_strict_0<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor_strict==0]
length(visit2_subjects_biclass_strict_0)
persistent_subjects_biclass_strict_0<-visit2_subjects_biclass_strict_0[visit2_subjects_biclass_strict_0 %in% visit1_subjects_biclass_strict_0]
length(persistent_subjects_biclass_strict_0)
visit1_subjects_biclass_strict_1<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor_strict==1]
length(visit1_subjects_biclass_strict_1)
visit2_subjects_biclass_strict_1<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor_strict==1]
length(visit2_subjects_biclass_strict_1)
persistent_subjects_biclass_strict_1<-visit2_subjects_biclass_strict_1[visit2_subjects_biclass_strict_1 %in% visit1_subjects_biclass_strict_1]
length(persistent_subjects_biclass_strict_1)
#exclude smokers from 1
persistent_subjects_biclass_strict_1_non_smokers<-persistent_subjects_biclass_strict_1[!(persistent_subjects_biclass_strict_1 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==1 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_biclass_strict_1_non_smokers)
#biclassstrictopposite 01
visit1_subjects_biclass_strict_0_opposite<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor_strict_opposite==0]
length(visit1_subjects_biclass_strict_0_opposite)
visit2_subjects_biclass_strict_0_opposite<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor_strict_opposite==0]
length(visit2_subjects_biclass_strict_0_opposite)
persistent_subjects_biclass_strict_0_opposite<-visit2_subjects_biclass_strict_0_opposite[visit2_subjects_biclass_strict_0_opposite %in% visit1_subjects_biclass_strict_0_opposite]
length(persistent_subjects_biclass_strict_0_opposite)
visit1_subjects_biclass_strict_1_opposite<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor_strict_opposite==1]
length(visit1_subjects_biclass_strict_1_opposite)
visit2_subjects_biclass_strict_1_opposite<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor_strict_opposite==1]
length(visit2_subjects_biclass_strict_1_opposite)
persistent_subjects_biclass_strict_1_opposite<-visit2_subjects_biclass_strict_1[visit2_subjects_biclass_strict_1_opposite %in% visit1_subjects_biclass_strict_1_opposite]
length(persistent_subjects_biclass_strict_1_opposite)
#exclude former smokers from 1
persistent_subjects_biclass_strict_1_opposite_non_former_smokers<-persistent_subjects_biclass_strict_1_opposite[!(persistent_subjects_biclass_strict_1_opposite %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==2 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_biclass_strict_1_opposite_non_former_smokers)
#biclass 01
visit1_subjects_biclass_0<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor==0]
length(visit1_subjects_biclass_0)
visit2_subjects_biclass_0<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor==0]
length(visit2_subjects_biclass_0)
persistent_subjects_biclass_0<-visit2_subjects_biclass_strict_0[visit2_subjects_biclass_0 %in% visit1_subjects_biclass_0]
length(persistent_subjects_biclass_0)
visit1_subjects_biclass_1<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor==1]
length(visit1_subjects_biclass_1)
visit2_subjects_biclass_1<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor==1]
length(visit2_subjects_biclass_1)
persistent_subjects_biclass_1<-visit2_subjects_biclass_1[visit2_subjects_biclass_1 %in% visit1_subjects_biclass_1]
length(persistent_subjects_biclass_1)
#exclude smokers from 1
persistent_subjects_biclass_1_non_smokers<-persistent_subjects_biclass_1[!(persistent_subjects_biclass_1 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==1 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_biclass_1_non_smokers)
#biclassopposite 01
visit1_subjects_biclass_0_opposite<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor_opposite==0]
length(visit1_subjects_biclass_0_opposite)
visit2_subjects_biclass_0_opposite<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor_opposite==0]
length(visit2_subjects_biclass_0_opposite)
persistent_subjects_biclass_0_opposite<-visit2_subjects_biclass_0_opposite[visit2_subjects_biclass_0_opposite %in% visit1_subjects_biclass_0_opposite]
length(persistent_subjects_biclass_0_opposite)
visit1_subjects_biclass_1_opposite<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor_opposite==1]
length(visit1_subjects_biclass_1_opposite)
visit2_subjects_biclass_1_opposite<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor_opposite==1]
length(visit2_subjects_biclass_1_opposite)
persistent_subjects_biclass_1_opposite<-visit2_subjects_biclass_1[visit2_subjects_biclass_1_opposite %in% visit1_subjects_biclass_1_opposite]
length(persistent_subjects_biclass_1_opposite)
#exclude former smokers from 1
persistent_subjects_biclass_1_opposite_non_former_smokers<-persistent_subjects_biclass_1_opposite[!(persistent_subjects_biclass_1_opposite %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==2 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_biclass_1_opposite_non_former_smokers)
#multiclass -2-1 0 1 2
visit1_subjects_multiclass_0<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor==0]
length(visit1_subjects_multiclass_0)
visit2_subjects_multiclass_0<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor==0]
length(visit2_subjects_multiclass_0)
persistent_subjects_multiclass_0<-visit2_subjects_multiclass_0[visit2_subjects_multiclass_0 %in% visit1_subjects_multiclass_0]
length(persistent_subjects_multiclass_0)
visit1_subjects_multiclass_1<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor==1]
length(visit1_subjects_multiclass_1)
visit2_subjects_multiclass_1<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor==1]
length(visit2_subjects_multiclass_1)
persistent_subjects_multiclass_1<-visit2_subjects_multiclass_1[visit2_subjects_multiclass_1 %in% visit1_subjects_multiclass_1]
length(persistent_subjects_multiclass_1)
#exclude smokers from 1
persistent_subjects_multiclass_1_non_smokers<-persistent_subjects_multiclass_1[!(persistent_subjects_multiclass_1 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==1 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_multiclass_1_non_smokers)
visit1_subjects_multiclass_2<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor==2]
length(visit1_subjects_multiclass_2)
visit2_subjects_multiclass_2<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor==2]
length(visit2_subjects_multiclass_2)
persistent_subjects_multiclass_2<-visit2_subjects_multiclass_2[visit2_subjects_multiclass_2 %in% visit1_subjects_multiclass_2]
length(persistent_subjects_multiclass_2)
#exclude smokers from 2
persistent_subjects_multiclass_2_non_smokers<-persistent_subjects_multiclass_2[!(persistent_subjects_multiclass_2 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==1 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_multiclass_2_non_smokers)
visit1_subjects_multiclass_minus1<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor==-1]
length(visit1_subjects_multiclass_minus1)
visit2_subjects_multiclass_minus1<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor==-1]
length(visit2_subjects_multiclass_minus1)
persistent_subjects_multiclass_minus1<-visit2_subjects_multiclass_minus1[visit2_subjects_multiclass_minus1 %in% visit1_subjects_multiclass_minus1]
length(persistent_subjects_multiclass_minus1)
#exclude former smokers from -1
persistent_subjects_multiclass_minus1_non_smokers<-persistent_subjects_multiclass_minus1[!(persistent_subjects_multiclass_minus1 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==2 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_multiclass_minus1_non_smokers)
visit1_subjects_multiclass_minus2<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor==-2]
length(visit1_subjects_multiclass_minus2)
visit2_subjects_multiclass_minus2<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor==-2]
length(visit2_subjects_multiclass_minus2)
persistent_subjects_multiclass_minus2<-visit2_subjects_multiclass_minus2[visit2_subjects_multiclass_minus2 %in% visit1_subjects_multiclass_minus2]
length(persistent_subjects_multiclass_minus2)
#exclude former smokers from -2
persistent_subjects_multiclass_minus2_non_smokers<-persistent_subjects_multiclass_minus2[!(persistent_subjects_multiclass_minus2 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==2 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_multiclass_minus2_non_smokers)
#there are no persistent subjects in -2 !!!
#allmulticlass 00 11 22 01 10 20 02 12 21
visit1_subjects_allmulticlass_00<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='00']
length(visit1_subjects_allmulticlass_00)
visit2_subjects_allmulticlass_00<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='00']
length(visit2_subjects_allmulticlass_00)
persistent_subjects_allmulticlass_00<-visit2_subjects_allmulticlass_00[visit2_subjects_allmulticlass_00 %in% visit1_subjects_allmulticlass_00]
length(persistent_subjects_allmulticlass_00)
visit1_subjects_allmulticlass_11<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='11']
length(visit1_subjects_allmulticlass_11)
visit2_subjects_allmulticlass_11<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='11']
length(visit2_subjects_allmulticlass_11)
persistent_subjects_allmulticlass_11<-visit2_subjects_allmulticlass_11[visit2_subjects_allmulticlass_11 %in% visit1_subjects_allmulticlass_11]
length(persistent_subjects_allmulticlass_11)
visit1_subjects_allmulticlass_22<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='22']
length(visit1_subjects_allmulticlass_22)
visit2_subjects_allmulticlass_22<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='22']
length(visit2_subjects_allmulticlass_22)
persistent_subjects_allmulticlass_22<-visit2_subjects_allmulticlass_22[visit2_subjects_allmulticlass_22 %in% visit1_subjects_allmulticlass_22]
length(persistent_subjects_allmulticlass_22)#empty
visit1_subjects_allmulticlass_01<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='01']
length(visit1_subjects_allmulticlass_01)
visit2_subjects_allmulticlass_01<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='01']
length(visit2_subjects_allmulticlass_01)
persistent_subjects_allmulticlass_01<-visit2_subjects_allmulticlass_01[visit2_subjects_allmulticlass_01 %in% visit1_subjects_allmulticlass_01]
length(persistent_subjects_allmulticlass_01)
#exclude smokers from 01
persistent_subjects_allmulticlass_01_non_smokers<-persistent_subjects_allmulticlass_01[!(persistent_subjects_allmulticlass_01 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==1 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_allmulticlass_01_non_smokers)
visit1_subjects_allmulticlass_02<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='02']
length(visit1_subjects_allmulticlass_02)
visit2_subjects_allmulticlass_02<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='02']
length(visit2_subjects_allmulticlass_02)
persistent_subjects_allmulticlass_02<-visit2_subjects_allmulticlass_02[visit2_subjects_allmulticlass_02 %in% visit1_subjects_allmulticlass_02]
length(persistent_subjects_allmulticlass_02)
#exclude smokers from 02
persistent_subjects_allmulticlass_02_non_smokers<-persistent_subjects_allmulticlass_02[!(persistent_subjects_allmulticlass_02 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==1 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_allmulticlass_02_non_smokers)
visit1_subjects_allmulticlass_12<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='12']
length(visit1_subjects_allmulticlass_12)
visit2_subjects_allmulticlass_12<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='12']
length(visit2_subjects_allmulticlass_12)
persistent_subjects_allmulticlass_12<-visit2_subjects_allmulticlass_12[visit2_subjects_allmulticlass_12 %in% visit1_subjects_allmulticlass_12]
length(persistent_subjects_allmulticlass_12)
#exclude smokers from 12
persistent_subjects_allmulticlass_12_non_smokers<-persistent_subjects_allmulticlass_12[!(persistent_subjects_allmulticlass_12 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==1 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_allmulticlass_12_non_smokers)
visit1_subjects_allmulticlass_10<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='10']
length(visit1_subjects_allmulticlass_10)
visit2_subjects_allmulticlass_10<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='10']
length(visit2_subjects_allmulticlass_10)
persistent_subjects_allmulticlass_10<-visit2_subjects_allmulticlass_10[visit2_subjects_allmulticlass_10 %in% visit1_subjects_allmulticlass_10]
length(persistent_subjects_allmulticlass_10)
#exclude former smokers from 10
persistent_subjects_allmulticlass_10_non_smokers<-persistent_subjects_allmulticlass_10[!(persistent_subjects_allmulticlass_10 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==2 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_allmulticlass_10_non_smokers)
visit1_subjects_allmulticlass_20<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='20']
length(visit1_subjects_allmulticlass_20)
visit2_subjects_allmulticlass_20<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='20']
length(visit2_subjects_allmulticlass_20)
persistent_subjects_allmulticlass_20<-visit2_subjects_allmulticlass_20[visit2_subjects_allmulticlass_20 %in% visit1_subjects_allmulticlass_20]
length(persistent_subjects_allmulticlass_20)
#exclude former smokers from 20
persistent_subjects_allmulticlass_20_non_smokers<-persistent_subjects_allmulticlass_20[!(persistent_subjects_allmulticlass_20 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==2 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_allmulticlass_20_non_smokers)#empty
visit1_subjects_allmulticlass_21<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='21']
length(visit1_subjects_allmulticlass_21)
visit2_subjects_allmulticlass_21<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='21']
length(visit2_subjects_allmulticlass_21)
persistent_subjects_allmulticlass_21<-visit2_subjects_allmulticlass_21[visit2_subjects_allmulticlass_21 %in% visit1_subjects_allmulticlass_21]
length(persistent_subjects_allmulticlass_21)
#exclude former smokers from 21
persistent_subjects_allmulticlass_21_non_smokers<-persistent_subjects_allmulticlass_21[!(persistent_subjects_allmulticlass_21 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==2 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_allmulticlass_21_non_smokers)#empty
#save enummers
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_strict_0,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_strict_0",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_strict_1_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_strict_1_non_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_strict_0_opposite,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_strict_0_opposite",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_strict_1_opposite_non_former_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_strict_1_opposite_non_former_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_0,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_0",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_1_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_1_non_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_0_opposite,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_0_opposite",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_1_opposite_non_former_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_1_opposite_non_former_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_multiclass_0,"enummer"], "../Results/persistantly_lean_subjects/enummers_multiclass_0",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_multiclass_1_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_multiclass_1_non_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_multiclass_2_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_multiclass_2_non_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_multiclass_minus1_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_multiclass_minus1_non_former_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_allmulticlass_00,"enummer"], "../Results/persistantly_lean_subjects/enummers_allmulticlass_00",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_allmulticlass_11,"enummer"], "../Results/persistantly_lean_subjects/enummers_allmulticlass_11",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_allmulticlass_22,"enummer"], "../Results/persistantly_lean_subjects/enummers_allmulticlass_22",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_allmulticlass_01_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_allmulticlass_01_non_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_allmulticlass_02_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_allmulticlass_02_non_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_allmulticlass_12_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_allmulticlass_12_non_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_allmulticlass_10_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_allmulticlass_10_non_former_smokers",row.names=F,col.names=F)
|
/code_leanPhty/select_PL_subjects/select_PL_2908.R
|
no_license
|
jernejaMislej/all_code
|
R
| false
| false
| 23,051
|
r
|
#load datasets
#load entire cleaned data (154009 lines)
VIP_data_all <- read.csv("../VIP_data/VIP_170206_cleaned.csv", header = TRUE, sep = ",", row.names = NULL, fill=TRUE)
#visit 1 and visit2 data
VIP_data_subset_visit1_complete_cases<- read.csv("../VIP_data/VIP_data_subset_visit1_complete_cases.csv", header = TRUE, sep = ",", row.names = NULL, fill=TRUE)
VIP_data_subset_visit2_complete_cases<- read.csv("../VIP_data/VIP_data_subset_visit2_complete_cases.csv", header = TRUE, sep = ",", row.names = NULL, fill=TRUE)
#resistance_case_control_susceptible
#resistance_continuous
#resistance_case_control_compliant
#susceptible_case_control_compliant
#case_02_control_compliant
#case_12_control_compliant
#case_01_control_compliant
#case_10_control_compliant
#biclassstrict 01
visit1_subjects_biclass_strict_0<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor_strict==0]
length(visit1_subjects_biclass_strict_0)
visit2_subjects_biclass_strict_0<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor_strict==0]
length(visit2_subjects_biclass_strict_0)
persistent_subjects_biclass_strict_0<-visit2_subjects_biclass_strict_0[visit2_subjects_biclass_strict_0 %in% visit1_subjects_biclass_strict_0]
length(persistent_subjects_biclass_strict_0)
visit1_subjects_biclass_strict_1<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor_strict==1]
length(visit1_subjects_biclass_strict_1)
visit2_subjects_biclass_strict_1<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor_strict==1]
length(visit2_subjects_biclass_strict_1)
persistent_subjects_biclass_strict_1<-visit2_subjects_biclass_strict_1[visit2_subjects_biclass_strict_1 %in% visit1_subjects_biclass_strict_1]
length(persistent_subjects_biclass_strict_1)
#exclude smokers from 1
persistent_subjects_biclass_strict_1_non_smokers<-persistent_subjects_biclass_strict_1[!(persistent_subjects_biclass_strict_1 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==1 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_biclass_strict_1_non_smokers)
#biclassstrictopposite 01
visit1_subjects_biclass_strict_0_opposite<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor_strict_opposite==0]
length(visit1_subjects_biclass_strict_0_opposite)
visit2_subjects_biclass_strict_0_opposite<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor_strict_opposite==0]
length(visit2_subjects_biclass_strict_0_opposite)
persistent_subjects_biclass_strict_0_opposite<-visit2_subjects_biclass_strict_0_opposite[visit2_subjects_biclass_strict_0_opposite %in% visit1_subjects_biclass_strict_0_opposite]
length(persistent_subjects_biclass_strict_0_opposite)
visit1_subjects_biclass_strict_1_opposite<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor_strict_opposite==1]
length(visit1_subjects_biclass_strict_1_opposite)
visit2_subjects_biclass_strict_1_opposite<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor_strict_opposite==1]
length(visit2_subjects_biclass_strict_1_opposite)
persistent_subjects_biclass_strict_1_opposite<-visit2_subjects_biclass_strict_1[visit2_subjects_biclass_strict_1_opposite %in% visit1_subjects_biclass_strict_1_opposite]
length(persistent_subjects_biclass_strict_1_opposite)
#exclude former smokers from 1
persistent_subjects_biclass_strict_1_opposite_non_former_smokers<-persistent_subjects_biclass_strict_1_opposite[!(persistent_subjects_biclass_strict_1_opposite %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==2 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_biclass_strict_1_opposite_non_former_smokers)
#biclass 01
visit1_subjects_biclass_0<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor==0]
length(visit1_subjects_biclass_0)
visit2_subjects_biclass_0<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor==0]
length(visit2_subjects_biclass_0)
persistent_subjects_biclass_0<-visit2_subjects_biclass_strict_0[visit2_subjects_biclass_0 %in% visit1_subjects_biclass_0]
length(persistent_subjects_biclass_0)
visit1_subjects_biclass_1<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor==1]
length(visit1_subjects_biclass_1)
visit2_subjects_biclass_1<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor==1]
length(visit2_subjects_biclass_1)
persistent_subjects_biclass_1<-visit2_subjects_biclass_1[visit2_subjects_biclass_1 %in% visit1_subjects_biclass_1]
length(persistent_subjects_biclass_1)
#exclude smokers from 1
persistent_subjects_biclass_1_non_smokers<-persistent_subjects_biclass_1[!(persistent_subjects_biclass_1 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==1 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_biclass_1_non_smokers)
#biclassopposite 01
visit1_subjects_biclass_0_opposite<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor_opposite==0]
length(visit1_subjects_biclass_0_opposite)
visit2_subjects_biclass_0_opposite<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor_opposite==0]
length(visit2_subjects_biclass_0_opposite)
persistent_subjects_biclass_0_opposite<-visit2_subjects_biclass_0_opposite[visit2_subjects_biclass_0_opposite %in% visit1_subjects_biclass_0_opposite]
length(persistent_subjects_biclass_0_opposite)
visit1_subjects_biclass_1_opposite<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_bi_factor_opposite==1]
length(visit1_subjects_biclass_1_opposite)
visit2_subjects_biclass_1_opposite<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_bi_factor_opposite==1]
length(visit2_subjects_biclass_1_opposite)
persistent_subjects_biclass_1_opposite<-visit2_subjects_biclass_1[visit2_subjects_biclass_1_opposite %in% visit1_subjects_biclass_1_opposite]
length(persistent_subjects_biclass_1_opposite)
#exclude former smokers from 1
persistent_subjects_biclass_1_opposite_non_former_smokers<-persistent_subjects_biclass_1_opposite[!(persistent_subjects_biclass_1_opposite %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==2 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_biclass_1_opposite_non_former_smokers)
#multiclass -2-1 0 1 2
visit1_subjects_multiclass_0<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor==0]
length(visit1_subjects_multiclass_0)
visit2_subjects_multiclass_0<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor==0]
length(visit2_subjects_multiclass_0)
persistent_subjects_multiclass_0<-visit2_subjects_multiclass_0[visit2_subjects_multiclass_0 %in% visit1_subjects_multiclass_0]
length(persistent_subjects_multiclass_0)
visit1_subjects_multiclass_1<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor==1]
length(visit1_subjects_multiclass_1)
visit2_subjects_multiclass_1<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor==1]
length(visit2_subjects_multiclass_1)
persistent_subjects_multiclass_1<-visit2_subjects_multiclass_1[visit2_subjects_multiclass_1 %in% visit1_subjects_multiclass_1]
length(persistent_subjects_multiclass_1)
#exclude smokers from 1
persistent_subjects_multiclass_1_non_smokers<-persistent_subjects_multiclass_1[!(persistent_subjects_multiclass_1 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==1 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_multiclass_1_non_smokers)
visit1_subjects_multiclass_2<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor==2]
length(visit1_subjects_multiclass_2)
visit2_subjects_multiclass_2<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor==2]
length(visit2_subjects_multiclass_2)
persistent_subjects_multiclass_2<-visit2_subjects_multiclass_2[visit2_subjects_multiclass_2 %in% visit1_subjects_multiclass_2]
length(persistent_subjects_multiclass_2)
#exclude smokers from 2
persistent_subjects_multiclass_2_non_smokers<-persistent_subjects_multiclass_2[!(persistent_subjects_multiclass_2 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==1 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_multiclass_2_non_smokers)
visit1_subjects_multiclass_minus1<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor==-1]
length(visit1_subjects_multiclass_minus1)
visit2_subjects_multiclass_minus1<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor==-1]
length(visit2_subjects_multiclass_minus1)
persistent_subjects_multiclass_minus1<-visit2_subjects_multiclass_minus1[visit2_subjects_multiclass_minus1 %in% visit1_subjects_multiclass_minus1]
length(persistent_subjects_multiclass_minus1)
#exclude former smokers from -1
persistent_subjects_multiclass_minus1_non_smokers<-persistent_subjects_multiclass_minus1[!(persistent_subjects_multiclass_minus1 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==2 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_multiclass_minus1_non_smokers)
visit1_subjects_multiclass_minus2<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor==-2]
length(visit1_subjects_multiclass_minus2)
visit2_subjects_multiclass_minus2<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor==-2]
length(visit2_subjects_multiclass_minus2)
persistent_subjects_multiclass_minus2<-visit2_subjects_multiclass_minus2[visit2_subjects_multiclass_minus2 %in% visit1_subjects_multiclass_minus2]
length(persistent_subjects_multiclass_minus2)
#exclude former smokers from -2
persistent_subjects_multiclass_minus2_non_smokers<-persistent_subjects_multiclass_minus2[!(persistent_subjects_multiclass_minus2 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==2 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_multiclass_minus2_non_smokers)
#there are no persistent subjects in -2 !!!
#allmulticlass 00 11 22 01 10 20 02 12 21
visit1_subjects_allmulticlass_00<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='00']
length(visit1_subjects_allmulticlass_00)
visit2_subjects_allmulticlass_00<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='00']
length(visit2_subjects_allmulticlass_00)
persistent_subjects_allmulticlass_00<-visit2_subjects_allmulticlass_00[visit2_subjects_allmulticlass_00 %in% visit1_subjects_allmulticlass_00]
length(persistent_subjects_allmulticlass_00)
visit1_subjects_allmulticlass_11<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='11']
length(visit1_subjects_allmulticlass_11)
visit2_subjects_allmulticlass_11<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='11']
length(visit2_subjects_allmulticlass_11)
persistent_subjects_allmulticlass_11<-visit2_subjects_allmulticlass_11[visit2_subjects_allmulticlass_11 %in% visit1_subjects_allmulticlass_11]
length(persistent_subjects_allmulticlass_11)
visit1_subjects_allmulticlass_22<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='22']
length(visit1_subjects_allmulticlass_22)
visit2_subjects_allmulticlass_22<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='22']
length(visit2_subjects_allmulticlass_22)
persistent_subjects_allmulticlass_22<-visit2_subjects_allmulticlass_22[visit2_subjects_allmulticlass_22 %in% visit1_subjects_allmulticlass_22]
length(persistent_subjects_allmulticlass_22)#empty
visit1_subjects_allmulticlass_01<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='01']
length(visit1_subjects_allmulticlass_01)
visit2_subjects_allmulticlass_01<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='01']
length(visit2_subjects_allmulticlass_01)
persistent_subjects_allmulticlass_01<-visit2_subjects_allmulticlass_01[visit2_subjects_allmulticlass_01 %in% visit1_subjects_allmulticlass_01]
length(persistent_subjects_allmulticlass_01)
#exclude smokers from 01
persistent_subjects_allmulticlass_01_non_smokers<-persistent_subjects_allmulticlass_01[!(persistent_subjects_allmulticlass_01 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==1 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_allmulticlass_01_non_smokers)
visit1_subjects_allmulticlass_02<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='02']
length(visit1_subjects_allmulticlass_02)
visit2_subjects_allmulticlass_02<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='02']
length(visit2_subjects_allmulticlass_02)
persistent_subjects_allmulticlass_02<-visit2_subjects_allmulticlass_02[visit2_subjects_allmulticlass_02 %in% visit1_subjects_allmulticlass_02]
length(persistent_subjects_allmulticlass_02)
#exclude smokers from 02
persistent_subjects_allmulticlass_02_non_smokers<-persistent_subjects_allmulticlass_02[!(persistent_subjects_allmulticlass_02 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==1 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_allmulticlass_02_non_smokers)
visit1_subjects_allmulticlass_12<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='12']
length(visit1_subjects_allmulticlass_12)
visit2_subjects_allmulticlass_12<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='12']
length(visit2_subjects_allmulticlass_12)
persistent_subjects_allmulticlass_12<-visit2_subjects_allmulticlass_12[visit2_subjects_allmulticlass_12 %in% visit1_subjects_allmulticlass_12]
length(persistent_subjects_allmulticlass_12)
#exclude smokers from 12
persistent_subjects_allmulticlass_12_non_smokers<-persistent_subjects_allmulticlass_12[!(persistent_subjects_allmulticlass_12 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==1 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_allmulticlass_12_non_smokers)
visit1_subjects_allmulticlass_10<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='10']
length(visit1_subjects_allmulticlass_10)
visit2_subjects_allmulticlass_10<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='10']
length(visit2_subjects_allmulticlass_10)
persistent_subjects_allmulticlass_10<-visit2_subjects_allmulticlass_10[visit2_subjects_allmulticlass_10 %in% visit1_subjects_allmulticlass_10]
length(persistent_subjects_allmulticlass_10)
#exclude former smokers from 10
persistent_subjects_allmulticlass_10_non_smokers<-persistent_subjects_allmulticlass_10[!(persistent_subjects_allmulticlass_10 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==2 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_allmulticlass_10_non_smokers)
visit1_subjects_allmulticlass_20<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='20']
length(visit1_subjects_allmulticlass_20)
visit2_subjects_allmulticlass_20<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='20']
length(visit2_subjects_allmulticlass_20)
persistent_subjects_allmulticlass_20<-visit2_subjects_allmulticlass_20[visit2_subjects_allmulticlass_20 %in% visit1_subjects_allmulticlass_20]
length(persistent_subjects_allmulticlass_20)
#exclude former smokers from 20
persistent_subjects_allmulticlass_20_non_smokers<-persistent_subjects_allmulticlass_20[!(persistent_subjects_allmulticlass_20 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==2 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_allmulticlass_20_non_smokers)#empty
visit1_subjects_allmulticlass_21<-VIP_data_subset_visit1_complete_cases$Subject_id[VIP_data_subset_visit1_complete_cases$compliance_multi_factor_all=='21']
length(visit1_subjects_allmulticlass_21)
visit2_subjects_allmulticlass_21<-VIP_data_subset_visit2_complete_cases$Subject_id[VIP_data_subset_visit2_complete_cases$compliance_multi_factor_all=='21']
length(visit2_subjects_allmulticlass_21)
persistent_subjects_allmulticlass_21<-visit2_subjects_allmulticlass_21[visit2_subjects_allmulticlass_21 %in% visit1_subjects_allmulticlass_21]
length(persistent_subjects_allmulticlass_21)
#exclude former smokers from 21
persistent_subjects_allmulticlass_21_non_smokers<-persistent_subjects_allmulticlass_21[!(persistent_subjects_allmulticlass_21 %in%
VIP_data_all$Subject_id[VIP_data_all$sm_status==2 & !is.na(VIP_data_all$sm_status)])]
length(persistent_subjects_allmulticlass_21_non_smokers)#empty
#save enummers
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_strict_0,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_strict_0",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_strict_1_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_strict_1_non_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_strict_0_opposite,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_strict_0_opposite",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_strict_1_opposite_non_former_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_strict_1_opposite_non_former_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_0,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_0",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_1_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_1_non_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_0_opposite,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_0_opposite",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_biclass_1_opposite_non_former_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_biclass_1_opposite_non_former_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_multiclass_0,"enummer"], "../Results/persistantly_lean_subjects/enummers_multiclass_0",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_multiclass_1_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_multiclass_1_non_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_multiclass_2_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_multiclass_2_non_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_multiclass_minus1_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_multiclass_minus1_non_former_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_allmulticlass_00,"enummer"], "../Results/persistantly_lean_subjects/enummers_allmulticlass_00",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_allmulticlass_11,"enummer"], "../Results/persistantly_lean_subjects/enummers_allmulticlass_11",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_allmulticlass_22,"enummer"], "../Results/persistantly_lean_subjects/enummers_allmulticlass_22",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_allmulticlass_01_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_allmulticlass_01_non_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_allmulticlass_02_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_allmulticlass_02_non_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_allmulticlass_12_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_allmulticlass_12_non_smokers",row.names=F,col.names=F)
write.table(VIP_data_subset_visit1_complete_cases[VIP_data_subset_visit1_complete_cases$Subject_id %in% persistent_subjects_allmulticlass_10_non_smokers,"enummer"], "../Results/persistantly_lean_subjects/enummers_allmulticlass_10_non_former_smokers",row.names=F,col.names=F)
|
#special cbind function
#my.cbind(x,y,first)
# FALSE means add NA to top of shorter vector
# TRUE means add NA to bottom of shorter vector
padNA <- function (mydata, rowsneeded, first = TRUE)
{
temp1 = colnames(mydata)
rowsneeded = rowsneeded - nrow(mydata)
temp2 = setNames(
data.frame(matrix(rep(NA, length(temp1) * rowsneeded),
ncol = length(temp1))), temp1)
if (isTRUE(first)) rbind(mydata, temp2)
else rbind(temp2, mydata)
}
dotnames <- function(...) {
vnames <- as.list(substitute(list(...)))[-1L]
vnames <- unlist(lapply(vnames,deparse), FALSE, FALSE)
vnames
}
Cbind <- function(..., first = TRUE) {
Names <- dotnames(...)
datalist <- setNames(list(...), Names)
nrows <- max(sapply(datalist, function(x)
ifelse(is.null(dim(x)), length(x), nrow(x))))
datalist <- lapply(seq_along(datalist), function(x) {
z <- datalist[[x]]
if (is.null(dim(z))) {
z <- setNames(data.frame(z), Names[x])
} else {
if (is.null(colnames(z))) {
colnames(z) <- paste(Names[x], sequence(ncol(z)), sep = "_")
} else {
colnames(z) <- paste(Names[x], colnames(z), sep = "_")
}
}
padNA(z, rowsneeded = nrows, first = first)
})
do.call(cbind, datalist)
}
|
/Cbind.R
|
no_license
|
poweihuang/airbnbforecast
|
R
| false
| false
| 1,254
|
r
|
#special cbind function
#my.cbind(x,y,first)
# FALSE means add NA to top of shorter vector
# TRUE means add NA to bottom of shorter vector
padNA <- function (mydata, rowsneeded, first = TRUE)
{
temp1 = colnames(mydata)
rowsneeded = rowsneeded - nrow(mydata)
temp2 = setNames(
data.frame(matrix(rep(NA, length(temp1) * rowsneeded),
ncol = length(temp1))), temp1)
if (isTRUE(first)) rbind(mydata, temp2)
else rbind(temp2, mydata)
}
dotnames <- function(...) {
vnames <- as.list(substitute(list(...)))[-1L]
vnames <- unlist(lapply(vnames,deparse), FALSE, FALSE)
vnames
}
Cbind <- function(..., first = TRUE) {
Names <- dotnames(...)
datalist <- setNames(list(...), Names)
nrows <- max(sapply(datalist, function(x)
ifelse(is.null(dim(x)), length(x), nrow(x))))
datalist <- lapply(seq_along(datalist), function(x) {
z <- datalist[[x]]
if (is.null(dim(z))) {
z <- setNames(data.frame(z), Names[x])
} else {
if (is.null(colnames(z))) {
colnames(z) <- paste(Names[x], sequence(ncol(z)), sep = "_")
} else {
colnames(z) <- paste(Names[x], colnames(z), sep = "_")
}
}
padNA(z, rowsneeded = nrows, first = first)
})
do.call(cbind, datalist)
}
|
install.packages("tm") install.packages("magrittr") install.packages("factoextra") install.packages("skmeans") install.packages("wordcloud") library(tm)
library(cluster) library(factoextra) library(magrittr) library(skmeans) library(wordcloud)
require("slam")
setwd("C:/Users/Praneeth Bomma/Desktop/KDD/Rdata")
text_corpus<-Corpus(DirSource("diabetes"))
text_corpus <- tm_map(text_corpus, stripWhitespace)
text_corpus <- tm_map(text_corpus, content_transformer(tolower)) text_corpus <- tm_map(text_corpus, removeWords, stopwords("english"))
text_corpus1<-Corpus(DirSource("test"))
text_corpus1 <- tm_map(text_corpus1, stripWhitespace)
text_corpus1 <- tm_map(text_corpus1, content_transformer(tolower)) text_corpus <- tm_map(text_corpus1, removeWords, stopwords("english")) #text_corpus <- tm_map(text_corpus, removePunctuation)
dtm <- DocumentTermMatrix(text_corpus)
summary(text_corpus)
inspect(dtm)
dtm <- weightTfIdf(dtm, normalize = TRUE)
inspect(dtm)
#
mfrq_words_per_cluster <- function(clus, dtm, first = 10, unique = TRUE){
if(!any(class(clus) == "skmeans")) return("clus must be an skmeans object")
dtm <- as.simple_triplet_matrix(dtm)
indM <- table(names(clus$cluster), clus$cluster) == 1 # generate bool matrix
hfun <- function(ind, dtm){ # help function, summing up words
if(is.null(dtm[ind, ])) dtm[ind, ] else col_sums(dtm[ind, ]) }
frqM <- apply(indM, 2, hfun, dtm = dtm)
if(unique){
# eliminate word which occur in several clusters
frqM <- frqM[rowSums(frqM > 0) == 1, ]
}
# export to list, order and take first x elements res <- lapply(1:ncol(frqM), function(i, mat, first)
head(sort(mat[, i], decreasing = TRUE), first), mat = frqM, first = first)
names(res) <- paste0("CLUSTER_", 1:ncol(frqM))
return(res)
}
#we have to delete a empty file to run this (data preprocessing) clus <- skmeans(dtm, 5)
mfrq_words_per_cluster(clus, dtm)
mfrq_words_per_cluster(clus, dtm, unique = FALSE)
#
m3<-as.matrix(dtm)
df3<-as.data.frame(m3)
#
m <- as.matrix(dtm)
dataframe <-as.data.frame(m)
#m <- m[1:2, 1:3]
distMatrix <- dist(dataframe, method="euclidean")
flatclust <- pam(distMatrix,k=2,metric = "manhattan",medoids = NULL) plot(flatclust, cex=0.9, hang=-1)
#flatclust1<- as.matrix(flatclust)
class(flatclust)
flatclust
#
install.packages("dendextend")
library(dendextend)
dendoclust <- hclust(distMatrix,method="ward.D")
dd <- as.dendrogram(dendoclust)
labels(dd)
label.dendrogram(dd)
plot(dendoclust, cex=0.9, hang=-1)
rect.hclust(dendoclust,k=25)
install.packages("ggplot2")
library(ggplot2)
m<-as.matrix(dtm)
gc()
#wordcloud of camel document text_corpus<-Corpus(DirSource("test1"))
text_corpus <- tm_map(text_corpus, stripWhitespace)
text_corpus <- tm_map(text_corpus, content_transformer(tolower)) text_corpus <- tm_map(text_corpus, removeWords, stopwords("english")) library(wordcloud)
wordcloud(text_corpus,min.freq = 1.5)
m<-as.matrix(dtm)
memory.limit()
dtm
m5<-m[,"camel"]
m5<-as.matrix(m5)
m6<-m[,"okra"]
m6<-as.matrix(m6)
# Cosine similarity
dtm <- DocumentTermMatrix(text_corpus)
m<-t(m)
ma<-cosine(m)
ma<-as.matrix(ma)
Title Extraction Using Python.
# -*- coding: utf-8 -*-
import os
path = os.getcwd()
path = path + "\\test"
arr = []
for file in next(os.walk(path))[2]:
arr.append(path+"\\"+file)
for file in arr:
print(file)
file1 = open(file,"r", encoding="utf8") file2Name = file.replace("test","output") file2 = open(file2Name,"w", encoding="utf8") for line in file1:
file2.write(line)
file2.close()
break
|
/Surprising_documents.R
|
no_license
|
pbomma/Finding-Surprising-Documents-on-Online-Health-Information
|
R
| false
| false
| 3,477
|
r
|
install.packages("tm") install.packages("magrittr") install.packages("factoextra") install.packages("skmeans") install.packages("wordcloud") library(tm)
library(cluster) library(factoextra) library(magrittr) library(skmeans) library(wordcloud)
require("slam")
setwd("C:/Users/Praneeth Bomma/Desktop/KDD/Rdata")
text_corpus<-Corpus(DirSource("diabetes"))
text_corpus <- tm_map(text_corpus, stripWhitespace)
text_corpus <- tm_map(text_corpus, content_transformer(tolower)) text_corpus <- tm_map(text_corpus, removeWords, stopwords("english"))
text_corpus1<-Corpus(DirSource("test"))
text_corpus1 <- tm_map(text_corpus1, stripWhitespace)
text_corpus1 <- tm_map(text_corpus1, content_transformer(tolower)) text_corpus <- tm_map(text_corpus1, removeWords, stopwords("english")) #text_corpus <- tm_map(text_corpus, removePunctuation)
dtm <- DocumentTermMatrix(text_corpus)
summary(text_corpus)
inspect(dtm)
dtm <- weightTfIdf(dtm, normalize = TRUE)
inspect(dtm)
#
mfrq_words_per_cluster <- function(clus, dtm, first = 10, unique = TRUE){
if(!any(class(clus) == "skmeans")) return("clus must be an skmeans object")
dtm <- as.simple_triplet_matrix(dtm)
indM <- table(names(clus$cluster), clus$cluster) == 1 # generate bool matrix
hfun <- function(ind, dtm){ # help function, summing up words
if(is.null(dtm[ind, ])) dtm[ind, ] else col_sums(dtm[ind, ]) }
frqM <- apply(indM, 2, hfun, dtm = dtm)
if(unique){
# eliminate word which occur in several clusters
frqM <- frqM[rowSums(frqM > 0) == 1, ]
}
# export to list, order and take first x elements res <- lapply(1:ncol(frqM), function(i, mat, first)
head(sort(mat[, i], decreasing = TRUE), first), mat = frqM, first = first)
names(res) <- paste0("CLUSTER_", 1:ncol(frqM))
return(res)
}
#we have to delete a empty file to run this (data preprocessing) clus <- skmeans(dtm, 5)
mfrq_words_per_cluster(clus, dtm)
mfrq_words_per_cluster(clus, dtm, unique = FALSE)
#
m3<-as.matrix(dtm)
df3<-as.data.frame(m3)
#
m <- as.matrix(dtm)
dataframe <-as.data.frame(m)
#m <- m[1:2, 1:3]
distMatrix <- dist(dataframe, method="euclidean")
flatclust <- pam(distMatrix,k=2,metric = "manhattan",medoids = NULL) plot(flatclust, cex=0.9, hang=-1)
#flatclust1<- as.matrix(flatclust)
class(flatclust)
flatclust
#
install.packages("dendextend")
library(dendextend)
dendoclust <- hclust(distMatrix,method="ward.D")
dd <- as.dendrogram(dendoclust)
labels(dd)
label.dendrogram(dd)
plot(dendoclust, cex=0.9, hang=-1)
rect.hclust(dendoclust,k=25)
install.packages("ggplot2")
library(ggplot2)
m<-as.matrix(dtm)
gc()
#wordcloud of camel document text_corpus<-Corpus(DirSource("test1"))
text_corpus <- tm_map(text_corpus, stripWhitespace)
text_corpus <- tm_map(text_corpus, content_transformer(tolower)) text_corpus <- tm_map(text_corpus, removeWords, stopwords("english")) library(wordcloud)
wordcloud(text_corpus,min.freq = 1.5)
m<-as.matrix(dtm)
memory.limit()
dtm
m5<-m[,"camel"]
m5<-as.matrix(m5)
m6<-m[,"okra"]
m6<-as.matrix(m6)
# Cosine similarity
dtm <- DocumentTermMatrix(text_corpus)
m<-t(m)
ma<-cosine(m)
ma<-as.matrix(ma)
Title Extraction Using Python.
# -*- coding: utf-8 -*-
import os
path = os.getcwd()
path = path + "\\test"
arr = []
for file in next(os.walk(path))[2]:
arr.append(path+"\\"+file)
for file in arr:
print(file)
file1 = open(file,"r", encoding="utf8") file2Name = file.replace("test","output") file2 = open(file2Name,"w", encoding="utf8") for line in file1:
file2.write(line)
file2.close()
break
|
# Prepare data set
SCC <- readRDS("C:/Users/victo/Desktop/COURSERA/2020 Data Science Specialization/04. Exploratory Data Analysis/Course_project_1/exdata_data_NEI_data/Source_Classification_Code.rds")
NEI <- readRDS("C:/Users/victo/Desktop/COURSERA/2020 Data Science Specialization/04. Exploratory Data Analysis/Course_project_1/exdata_data_NEI_data/summarySCC_PM25.rds")
# 1Q: Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
totalNEI <- aggregate(Emissions ~ year, NEI, sum)
plot(totalNEI$year, totalNEI$Emissions/1000, type = "o", col = "tomato",
xlab = "Year", ylab = expression("Total" ~ PM[2.5] ~ "Emissions (tons)"),
main = expression("Total US" ~ PM[2.5] ~ "Emissions by Year (from 1999 to 2008)"))
|
/plot1.R
|
no_license
|
Trochillianne/04.-Exploratory-Data-Analysis_Project2
|
R
| false
| false
| 771
|
r
|
# Prepare data set
SCC <- readRDS("C:/Users/victo/Desktop/COURSERA/2020 Data Science Specialization/04. Exploratory Data Analysis/Course_project_1/exdata_data_NEI_data/Source_Classification_Code.rds")
NEI <- readRDS("C:/Users/victo/Desktop/COURSERA/2020 Data Science Specialization/04. Exploratory Data Analysis/Course_project_1/exdata_data_NEI_data/summarySCC_PM25.rds")
# 1Q: Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
totalNEI <- aggregate(Emissions ~ year, NEI, sum)
plot(totalNEI$year, totalNEI$Emissions/1000, type = "o", col = "tomato",
xlab = "Year", ylab = expression("Total" ~ PM[2.5] ~ "Emissions (tons)"),
main = expression("Total US" ~ PM[2.5] ~ "Emissions by Year (from 1999 to 2008)"))
|
library(data.table)
plot4 <- function(output_to_screen=FALSE) {
hpc <- NULL
DateTime <- NULL
# Read the data from the household_power_consumption.txt file in the
# current working directory
read_data <- function() {
# Predefine some operating parameters for this function here
file <- 'household_power_consumption.txt'
allowed_dates <- as.Date(c('2007-02-01','2007-02-02'), '%Y-%m-%d')
# Read the file into a data.table
hpc <<- fread(file, sep=';', colClasses="character", na.strings="?")
# For this lesson we're only trying to get a graph of the dates
# 2007-02-01 and 2007-02-02. For this reason we're going to
# strip everything else out.
# It's faster to do this prior to building the DateTime string since
# pasting is a very heavy job with many objects
hpc <<- hpc[as.Date(Date, '%d/%m/%Y') %between% allowed_dates,]
# Next, combine the date and time to form a datetime posix object
DateTime <<- strptime(paste(hpc$Date, hpc$Time), '%d/%m/%Y %H:%M:%S')
}
# This function holds all of the plotting functionality
do_plot <- function() {
# Define how many plots our one view should have.
par(mfrow = c(2,2)) # Two Rows, Two Columns
# Coerce Global Active power to to the numeric type
hpc[,Global_active_power:=as.numeric(Global_active_power)]
# First plot [ top left ]
# Plot global_active_power on y axis over the date time on x axis.
plot(
DateTime, hpc$Global_active_power,
type="l", xlab="", ylab="Global Active Power",
main=""
)
# Second Plot [ top right ]
# Plot the voltage over the date time
plot(
DateTime, hpc$Voltage,
type="l", xlab="datetime", ylab="Voltage",
main=""
)
# Third Plot [ bottom left ]
# Plot energy sub metering over date time
plot(
DateTime, hpc$Sub_metering_1,
xlab = "", ylab="Energy sub metering", type="n"
)
lines(DateTime, hpc$Sub_metering_1, col="grey")
lines(DateTime, hpc$Sub_metering_2, col="red")
lines(DateTime, hpc$Sub_metering_3, col="blue")
legend("topright",
col=c('grey', 'red', 'blue'), bty="n", lty=1,
legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3')
)
# Fourth Plot [ bottom right ]
# Plot the global reactive power
plot(
DateTime, hpc$Global_reactive_power, type="l",
xlab="datetime", ylab="Global_reactive_power"
)
}
# First, read the data
read_data()
# Next determine if we're writing to a file or outputting to screen.
if(output_to_screen) {
# Render to the screen graphics device.
do_plot()
return();
}
# Open a graphics device for PNG, plot, and close the device.
png("plot4.png", width=480, height=480)
do_plot()
dev.off()
}
|
/plot4.R
|
no_license
|
Howard3/ExData_Plotting1
|
R
| false
| false
| 3,176
|
r
|
library(data.table)
plot4 <- function(output_to_screen=FALSE) {
hpc <- NULL
DateTime <- NULL
# Read the data from the household_power_consumption.txt file in the
# current working directory
read_data <- function() {
# Predefine some operating parameters for this function here
file <- 'household_power_consumption.txt'
allowed_dates <- as.Date(c('2007-02-01','2007-02-02'), '%Y-%m-%d')
# Read the file into a data.table
hpc <<- fread(file, sep=';', colClasses="character", na.strings="?")
# For this lesson we're only trying to get a graph of the dates
# 2007-02-01 and 2007-02-02. For this reason we're going to
# strip everything else out.
# It's faster to do this prior to building the DateTime string since
# pasting is a very heavy job with many objects
hpc <<- hpc[as.Date(Date, '%d/%m/%Y') %between% allowed_dates,]
# Next, combine the date and time to form a datetime posix object
DateTime <<- strptime(paste(hpc$Date, hpc$Time), '%d/%m/%Y %H:%M:%S')
}
# This function holds all of the plotting functionality
do_plot <- function() {
# Define how many plots our one view should have.
par(mfrow = c(2,2)) # Two Rows, Two Columns
# Coerce Global Active power to to the numeric type
hpc[,Global_active_power:=as.numeric(Global_active_power)]
# First plot [ top left ]
# Plot global_active_power on y axis over the date time on x axis.
plot(
DateTime, hpc$Global_active_power,
type="l", xlab="", ylab="Global Active Power",
main=""
)
# Second Plot [ top right ]
# Plot the voltage over the date time
plot(
DateTime, hpc$Voltage,
type="l", xlab="datetime", ylab="Voltage",
main=""
)
# Third Plot [ bottom left ]
# Plot energy sub metering over date time
plot(
DateTime, hpc$Sub_metering_1,
xlab = "", ylab="Energy sub metering", type="n"
)
lines(DateTime, hpc$Sub_metering_1, col="grey")
lines(DateTime, hpc$Sub_metering_2, col="red")
lines(DateTime, hpc$Sub_metering_3, col="blue")
legend("topright",
col=c('grey', 'red', 'blue'), bty="n", lty=1,
legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3')
)
# Fourth Plot [ bottom right ]
# Plot the global reactive power
plot(
DateTime, hpc$Global_reactive_power, type="l",
xlab="datetime", ylab="Global_reactive_power"
)
}
# First, read the data
read_data()
# Next determine if we're writing to a file or outputting to screen.
if(output_to_screen) {
# Render to the screen graphics device.
do_plot()
return();
}
# Open a graphics device for PNG, plot, and close the device.
png("plot4.png", width=480, height=480)
do_plot()
dev.off()
}
|
#### Ler arquivo ####
#covid_original <- readxl::read_xlsx(
#"data-raw/HIST_PAINEL_COVIDBR_06set2020.xlsx")
library(dplyr)
covid_original <- readr::read_rds(path = "data-raw/covid.rds")
#### Organizar ####
# Dados do covid.saude.gov.br / Sobre:
# Incidência = Estima o risco de ocorrência de casos de COVID-19 na população
# casos confirmados / populacao * 100.000
# Mortalidade = Número de óbitos por doenças COVID-19, por 100 mil habitantes
# óbitos / população * 100.000
# Letalidade = N. de óbitos confirmados em relação ao total de casos
# confirmados óbitos / casos confirmados * 100
# covid_original <- covid_original %>%
# select(-codRegiaoSaude, -Recuperadosnovos,
# -emAcompanhamentoNovos, -obitosAcumulado_log2, -obitosNovos_log2) %>%
# mutate(
# incidencia = casosAcumulado / populacaoTCU2019 * 100000,
# mortalidade = obitosAcumulado / populacaoTCU2019 * 100000,
# letalidade = obitosAcumulado / casosAcumulado * 100,
# )
covid_original <- covid_original %>%
select(-codRegiaoSaude, -Recuperadosnovos,
-emAcompanhamentoNovos, -obitosAcumulado_log2, -obitosNovos_log2)
#### Dividir ####
# Nacional
covid_brasil <- covid_original %>%
filter(regiao == "Brasil", is.na(estado), is.na(municipio))
# Por estado
covid_estado <- covid_original %>%
filter(regiao != "Brasil", !is.na(estado), is.na(municipio))
# Por municipio
covid_municipio <- covid_original %>%
filter(regiao != "Brasil", !is.na(estado), !is.na(municipio))
# Restringir SP
covid_sp <- covid_municipio %>%
filter(estado == "SP")
#### Corrigir códigos ####
covid_sp <- covid_sp %>%
left_join(y = select(abjData::cadmun, 1:2),
by = c("codmun" = "MUNCOD"))
covid_sp <- covid_sp %>%
mutate(code_muni = MUNCODDV) %>%
select(-MUNCODDV, -codmun)
#### Exportar ####
# Apenas os dados de SP
readr::write_rds(covid_sp, "data/COVID-sp.rds")
|
/data-raw/COVID.R
|
no_license
|
rfdornelles/TrabalhoFinal
|
R
| false
| false
| 1,905
|
r
|
#### Ler arquivo ####
#covid_original <- readxl::read_xlsx(
#"data-raw/HIST_PAINEL_COVIDBR_06set2020.xlsx")
library(dplyr)
covid_original <- readr::read_rds(path = "data-raw/covid.rds")
#### Organizar ####
# Dados do covid.saude.gov.br / Sobre:
# Incidência = Estima o risco de ocorrência de casos de COVID-19 na população
# casos confirmados / populacao * 100.000
# Mortalidade = Número de óbitos por doenças COVID-19, por 100 mil habitantes
# óbitos / população * 100.000
# Letalidade = N. de óbitos confirmados em relação ao total de casos
# confirmados óbitos / casos confirmados * 100
# covid_original <- covid_original %>%
# select(-codRegiaoSaude, -Recuperadosnovos,
# -emAcompanhamentoNovos, -obitosAcumulado_log2, -obitosNovos_log2) %>%
# mutate(
# incidencia = casosAcumulado / populacaoTCU2019 * 100000,
# mortalidade = obitosAcumulado / populacaoTCU2019 * 100000,
# letalidade = obitosAcumulado / casosAcumulado * 100,
# )
covid_original <- covid_original %>%
select(-codRegiaoSaude, -Recuperadosnovos,
-emAcompanhamentoNovos, -obitosAcumulado_log2, -obitosNovos_log2)
#### Dividir ####
# Nacional
covid_brasil <- covid_original %>%
filter(regiao == "Brasil", is.na(estado), is.na(municipio))
# Por estado
covid_estado <- covid_original %>%
filter(regiao != "Brasil", !is.na(estado), is.na(municipio))
# Por municipio
covid_municipio <- covid_original %>%
filter(regiao != "Brasil", !is.na(estado), !is.na(municipio))
# Restringir SP
covid_sp <- covid_municipio %>%
filter(estado == "SP")
#### Corrigir códigos ####
covid_sp <- covid_sp %>%
left_join(y = select(abjData::cadmun, 1:2),
by = c("codmun" = "MUNCOD"))
covid_sp <- covid_sp %>%
mutate(code_muni = MUNCODDV) %>%
select(-MUNCODDV, -codmun)
#### Exportar ####
# Apenas os dados de SP
readr::write_rds(covid_sp, "data/COVID-sp.rds")
|
# This script generates the equilibria of the competition model against a range of various levels of breast milk.
# call competition model
source("../model/competition model.R")
# make directory for saving data
output_fz_vaginal <- "data/f_z_vaginal_without_M.rData"
output_fz_csection <- "data/f_z_c-section_without_M.rData"
# make an empty dataframe
df_fz_csection <- data.frame()
df_fz_vaginal <- data.frame()
# parameters --------------------------------------------------------------
library(deSolve)
library(plyr)
# parameters for the function Z(t)
w <- 0.014; h <- 500
# growth rate
r <- 1
# competition coefficient
alpha <- 2
alpha_c <- 1.7
# carrying capacity
k <- 1000
# input of bacteria
f_2 <- 30/k; f_3 <- 50/k
# solve the function with different f_z values
pow <- seq(-2,2,0.04)
cnt <- 1
repeat {
f_z <- 10^pow[cnt]/k
print(paste0("f_z = ", f_z))
# parameters used in the function
p <- c(f_2 = f_2, f_3 = f_3, f_z = f_z,
r = r, alpha =alpha, k=k,
alpha_c=alpha_c)
# initial conditions for c-section
y0_csection <- c(B_1 = 1/k,B_2 = 1/k,B_3 = 50/k)
# initial conditions for vaginal
y0_vaginal <- c(B_1 = 50/k, B_2 = 50/k, B_3 = 1/k)
# times
times <- seq(0,1000,1)
# solve ode and save as data frame
output_csection <- data.frame(ode(y = y0_csection, times, competition, p))
output_vaginal <- data.frame(ode(y = y0_vaginal, times, competition, p))
output_csection$f_z <- f_z
output_vaginal$f_z <- f_z
# add the last row of output to df
df_fz_csection <- rbind(df_fz_csection, output_csection[length(output_csection$time),2:5])
df_fz_vaginal <- rbind(df_fz_vaginal, output_vaginal[length(output_vaginal$time),2:5])
# repeat to run the function
cnt <- cnt+1
if(cnt == 102) {
break
}
}
# save the data
save(df_fz_vaginal,file = output_fz_vaginal)
save(df_fz_csection,file = output_fz_csection)
|
/code/data generation/effect of milk fz_competition model.R
|
permissive
|
xiyanxiongnico/Modelling-the-effect-of-birth-and-feeding-modes-on-the-development-of-human-gut-microbiota
|
R
| false
| false
| 1,934
|
r
|
# This script generates the equilibria of the competition model against a range of various levels of breast milk.
# call competition model
source("../model/competition model.R")
# make directory for saving data
output_fz_vaginal <- "data/f_z_vaginal_without_M.rData"
output_fz_csection <- "data/f_z_c-section_without_M.rData"
# make an empty dataframe
df_fz_csection <- data.frame()
df_fz_vaginal <- data.frame()
# parameters --------------------------------------------------------------
library(deSolve)
library(plyr)
# parameters for the function Z(t)
w <- 0.014; h <- 500
# growth rate
r <- 1
# competition coefficient
alpha <- 2
alpha_c <- 1.7
# carrying capacity
k <- 1000
# input of bacteria
f_2 <- 30/k; f_3 <- 50/k
# solve the function with different f_z values
pow <- seq(-2,2,0.04)
cnt <- 1
repeat {
f_z <- 10^pow[cnt]/k
print(paste0("f_z = ", f_z))
# parameters used in the function
p <- c(f_2 = f_2, f_3 = f_3, f_z = f_z,
r = r, alpha =alpha, k=k,
alpha_c=alpha_c)
# initial conditions for c-section
y0_csection <- c(B_1 = 1/k,B_2 = 1/k,B_3 = 50/k)
# initial conditions for vaginal
y0_vaginal <- c(B_1 = 50/k, B_2 = 50/k, B_3 = 1/k)
# times
times <- seq(0,1000,1)
# solve ode and save as data frame
output_csection <- data.frame(ode(y = y0_csection, times, competition, p))
output_vaginal <- data.frame(ode(y = y0_vaginal, times, competition, p))
output_csection$f_z <- f_z
output_vaginal$f_z <- f_z
# add the last row of output to df
df_fz_csection <- rbind(df_fz_csection, output_csection[length(output_csection$time),2:5])
df_fz_vaginal <- rbind(df_fz_vaginal, output_vaginal[length(output_vaginal$time),2:5])
# repeat to run the function
cnt <- cnt+1
if(cnt == 102) {
break
}
}
# save the data
save(df_fz_vaginal,file = output_fz_vaginal)
save(df_fz_csection,file = output_fz_csection)
|
# Extract GADM to Points
# Load Data --------------------------------------------------------------------
#### Grid points
points <- readRDS(file.path(finaldata_file_path, DATASET_TYPE,"individual_datasets", "points.Rds"))
if(grepl("grid", DATASET_TYPE)){
coordinates(points) <- ~long+lat
crs(points) <- CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
}
points <- points %>% spTransform(CRS(UTM_ETH))
#### Cities
city_data <- read.csv(file.path(finaldata_file_path, "city_population", "city_pop_geocoded.csv"))
coordinates(city_data) <- ~lon+lat
crs(city_data) <- CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
city_data <- city_data %>% spTransform(CRS(UTM_ETH))
city_data$id <- 1
# Aggregating only accepts SpatialPolyons, so buffer by small amount
city_data <- city_data %>% gBuffer(width=.1, byid=T)
# Distance to Cities -----------------------------------------------------------
#### Specific Cities
city_data_addisababa <- city_data[city_data$name %in% "Addis Ababa",]
#### Population Groups
## Three Groups
pop_group_list <- city_data$pop_1994 %>% quantile(probs = c(0.3333, 0.6666)) %>% as.numeric()
city_data$popsize_3groups <- 1
for(i in 1:length(pop_group_list)) city_data$popsize_3groups[city_data$pop_1994 >= pop_group_list[i]] <- (i+1)
city_data_popsize_3groups_g1 <- city_data[city_data$popsize_3groups %in% 1,] %>% raster::aggregate(by="id")
city_data_popsize_3groups_g2 <- city_data[city_data$popsize_3groups %in% 2,] %>% raster::aggregate(by="id")
city_data_popsize_3groups_g3 <- city_data[city_data$popsize_3groups %in% 3,] %>% raster::aggregate(by="id")
#### All Cities
city_data_all <- city_data %>% raster::aggregate(by="id")
# Calculate Distance -----------------------------------------------------------
points$distance_city_addisababa <- gDistance_chunks(points, city_data_addisababa, CHUNK_SIZE_DIST_ROADS, MCCORS_DIST_ROADS)
points$distance_city_popsize_3groups_g1 <- gDistance_chunks(points, city_data_popsize_3groups_g1, CHUNK_SIZE_DIST_ROADS, MCCORS_DIST_ROADS)
points$distance_city_popsize_3groups_g2 <- gDistance_chunks(points, city_data_popsize_3groups_g2, CHUNK_SIZE_DIST_ROADS, MCCORS_DIST_ROADS)
points$distance_city_popsize_3groups_g3 <- gDistance_chunks(points, city_data_popsize_3groups_g3, CHUNK_SIZE_DIST_ROADS, MCCORS_DIST_ROADS)
points$distance_city_all <- gDistance_chunks(points, city_data_all, CHUNK_SIZE_DIST_ROADS, MCCORS_DIST_ROADS)
# Export -----------------------------------------------------------------------
saveRDS(points@data, file.path(finaldata_file_path, DATASET_TYPE, "individual_datasets", "points_distance_cities.Rds"))
|
/02_create_main_analysis_datasets/02_extract_variables/02d_distance_cities.R
|
no_license
|
mohammed-seid/Ethiopia-Corridors-IE
|
R
| false
| false
| 2,652
|
r
|
# Extract GADM to Points
# Load Data --------------------------------------------------------------------
#### Grid points
points <- readRDS(file.path(finaldata_file_path, DATASET_TYPE,"individual_datasets", "points.Rds"))
if(grepl("grid", DATASET_TYPE)){
coordinates(points) <- ~long+lat
crs(points) <- CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
}
points <- points %>% spTransform(CRS(UTM_ETH))
#### Cities
city_data <- read.csv(file.path(finaldata_file_path, "city_population", "city_pop_geocoded.csv"))
coordinates(city_data) <- ~lon+lat
crs(city_data) <- CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
city_data <- city_data %>% spTransform(CRS(UTM_ETH))
city_data$id <- 1
# Aggregating only accepts SpatialPolyons, so buffer by small amount
city_data <- city_data %>% gBuffer(width=.1, byid=T)
# Distance to Cities -----------------------------------------------------------
#### Specific Cities
city_data_addisababa <- city_data[city_data$name %in% "Addis Ababa",]
#### Population Groups
## Three Groups
pop_group_list <- city_data$pop_1994 %>% quantile(probs = c(0.3333, 0.6666)) %>% as.numeric()
city_data$popsize_3groups <- 1
for(i in 1:length(pop_group_list)) city_data$popsize_3groups[city_data$pop_1994 >= pop_group_list[i]] <- (i+1)
city_data_popsize_3groups_g1 <- city_data[city_data$popsize_3groups %in% 1,] %>% raster::aggregate(by="id")
city_data_popsize_3groups_g2 <- city_data[city_data$popsize_3groups %in% 2,] %>% raster::aggregate(by="id")
city_data_popsize_3groups_g3 <- city_data[city_data$popsize_3groups %in% 3,] %>% raster::aggregate(by="id")
#### All Cities
city_data_all <- city_data %>% raster::aggregate(by="id")
# Calculate Distance -----------------------------------------------------------
points$distance_city_addisababa <- gDistance_chunks(points, city_data_addisababa, CHUNK_SIZE_DIST_ROADS, MCCORS_DIST_ROADS)
points$distance_city_popsize_3groups_g1 <- gDistance_chunks(points, city_data_popsize_3groups_g1, CHUNK_SIZE_DIST_ROADS, MCCORS_DIST_ROADS)
points$distance_city_popsize_3groups_g2 <- gDistance_chunks(points, city_data_popsize_3groups_g2, CHUNK_SIZE_DIST_ROADS, MCCORS_DIST_ROADS)
points$distance_city_popsize_3groups_g3 <- gDistance_chunks(points, city_data_popsize_3groups_g3, CHUNK_SIZE_DIST_ROADS, MCCORS_DIST_ROADS)
points$distance_city_all <- gDistance_chunks(points, city_data_all, CHUNK_SIZE_DIST_ROADS, MCCORS_DIST_ROADS)
# Export -----------------------------------------------------------------------
saveRDS(points@data, file.path(finaldata_file_path, DATASET_TYPE, "individual_datasets", "points_distance_cities.Rds"))
|
# ************************************************************ #
# Prepare treatment and control tables for matching
# this script is run as a job on the HPC
# this multiplies our 49 datasets by 6(comparisons), and in turn, by 2 (other counterfactual)
# ************************************************************ #
# libraries
library(readr)
library(dplyr)
library(fastDummies)
library(tidyr)
# parameters
controlVars = c("private","public")
treatmentVars = c("private","public", "protected", "sustainable_use", "indigenous", "communal", "quilombola")
# --------------------------------------------------------------------------#
# 1. READ IN DATA ----
wdmain <- "/gpfs1/data/idiv_meyer/01_projects/Andrea/P1"
wd_data_formatching <- paste0(wdmain, "/inputs/00data/for_matching/forMatchAnalysis/")
wd_out <- paste0(wdmain, "/inputs/00data/for_matching/forMatchAnalysisCEM")
# 2. Load match-analysis-ready datasets ----
# (these are parcel-level datasets for the extent of all Brazil that include joined data from Ruben's extractions of variables to be matched on)
setwd(wd_data_formatching)
input <- list.files()
i=as.integer(Sys.getenv('SGE_TASK_ID'))
dataset <- readRDS(input[i])
n <- gsub("_allAnalysisData.rds", "", input[i])
# 3. Set up tables for matching (creating dummies and separate dataframes for each match we're making) ----
# (e.g. indigenous tenure) and control (e.g. private tenure)
datalist <- dummy_cols(dataset, select_columns = "tenure")
# we need to create a table listing for each spatial-temporal scale combination of all individual matches that have to be built, e.g. indigenous against private, etc.
# create function to compare tenures: creates column where treatment is coded as 1, and control is coded as 0. everything else is coded as NA
# the function should also return a dataframe that keeps only the treatment and control observations (dropping all NA's)
# datalist original looked like: datalist[[i]][[j]] (i=extents)(j=data)
compareTenures <- function(datalist, control, treatment){
comparison_table <- datalist[,-grep("tenure_", colnames(datalist))]
comparison_table[,paste0(control, "_vs_", treatment)] <- ifelse(datalist[,paste0("tenure_", treatment)] == 1,1,
ifelse(datalist[,paste0("tenure_", control)] == 1,0,NA ) ) # give me a column that re-codes treatment and control variables
comparison_table <- drop_na(comparison_table) # give me a table that keeps only those observations which I'm specifically compariing (not NA's)
return(comparison_table)
}
# create function to apply compareTenures to all tenure forms
# returns a table with only one column that specifies the control compared to the treatment. e.g. public_vs_private
createTable_control_vs_treatment <- function(match_list, control) {
table_c_vs_t <- list()
# for(i in 1:length(match_list)) # for each extent (whether that's spatial or temporal)
# {
for(j in 1:length(treatmentVars)) # for each tenure type (except the one you're comparing to)
{
if(treatmentVars[j] != control) {
if(match(treatmentVars[j], gsub("tenure_", "", colnames(match_list)), nomatch = 0) != 0 ){
table_c_vs_t[[length(table_c_vs_t)+1]] <- compareTenures(match_list, control, treatmentVars[j])
names(table_c_vs_t)[length(table_c_vs_t)] <- paste0(n, "_", control, "_", treatmentVars[j])
}
}
}
# }
return(table_c_vs_t) # this should return all dataframes needed for matching, within this control established
}
# create function to apply "createTable_control_vs_treatment" for all controls by looping through our pre-established controlVars
loopThruControls <- function(match_extents_list,controlVars) {
tableForMatching <- list()
for(i in 1:length(controlVars))
{
tableForMatching[[i]] <- createTable_control_vs_treatment(match_extents_list, controlVars[i])
}
names(tableForMatching) <- controlVars
return(tableForMatching)
}
mydataset <- loopThruControls(datalist, controlVars)
# write data to be matched on
setwd(wd_out)
for(i in 1:length(mydataset))
{
for(j in 1:length(mydataset[[i]]))
{
write_csv(mydataset[[i]][[j]], paste0(names(mydataset[[i]][j]), ".csv"))
}
}
|
/02_PrepareTablesForMatching.R
|
permissive
|
pacheco-andrea/tenure-defor-br
|
R
| false
| false
| 4,244
|
r
|
# ************************************************************ #
# Prepare treatment and control tables for matching
# this script is run as a job on the HPC
# this multiplies our 49 datasets by 6(comparisons), and in turn, by 2 (other counterfactual)
# ************************************************************ #
# libraries
library(readr)
library(dplyr)
library(fastDummies)
library(tidyr)
# parameters
controlVars = c("private","public")
treatmentVars = c("private","public", "protected", "sustainable_use", "indigenous", "communal", "quilombola")
# --------------------------------------------------------------------------#
# 1. READ IN DATA ----
wdmain <- "/gpfs1/data/idiv_meyer/01_projects/Andrea/P1"
wd_data_formatching <- paste0(wdmain, "/inputs/00data/for_matching/forMatchAnalysis/")
wd_out <- paste0(wdmain, "/inputs/00data/for_matching/forMatchAnalysisCEM")
# 2. Load match-analysis-ready datasets ----
# (these are parcel-level datasets for the extent of all Brazil that include joined data from Ruben's extractions of variables to be matched on)
setwd(wd_data_formatching)
input <- list.files()
i=as.integer(Sys.getenv('SGE_TASK_ID'))
dataset <- readRDS(input[i])
n <- gsub("_allAnalysisData.rds", "", input[i])
# 3. Set up tables for matching (creating dummies and separate dataframes for each match we're making) ----
# (e.g. indigenous tenure) and control (e.g. private tenure)
datalist <- dummy_cols(dataset, select_columns = "tenure")
# we need to create a table listing for each spatial-temporal scale combination of all individual matches that have to be built, e.g. indigenous against private, etc.
# create function to compare tenures: creates column where treatment is coded as 1, and control is coded as 0. everything else is coded as NA
# the function should also return a dataframe that keeps only the treatment and control observations (dropping all NA's)
# datalist original looked like: datalist[[i]][[j]] (i=extents)(j=data)
compareTenures <- function(datalist, control, treatment){
comparison_table <- datalist[,-grep("tenure_", colnames(datalist))]
comparison_table[,paste0(control, "_vs_", treatment)] <- ifelse(datalist[,paste0("tenure_", treatment)] == 1,1,
ifelse(datalist[,paste0("tenure_", control)] == 1,0,NA ) ) # give me a column that re-codes treatment and control variables
comparison_table <- drop_na(comparison_table) # give me a table that keeps only those observations which I'm specifically compariing (not NA's)
return(comparison_table)
}
# create function to apply compareTenures to all tenure forms
# returns a table with only one column that specifies the control compared to the treatment. e.g. public_vs_private
createTable_control_vs_treatment <- function(match_list, control) {
table_c_vs_t <- list()
# for(i in 1:length(match_list)) # for each extent (whether that's spatial or temporal)
# {
for(j in 1:length(treatmentVars)) # for each tenure type (except the one you're comparing to)
{
if(treatmentVars[j] != control) {
if(match(treatmentVars[j], gsub("tenure_", "", colnames(match_list)), nomatch = 0) != 0 ){
table_c_vs_t[[length(table_c_vs_t)+1]] <- compareTenures(match_list, control, treatmentVars[j])
names(table_c_vs_t)[length(table_c_vs_t)] <- paste0(n, "_", control, "_", treatmentVars[j])
}
}
}
# }
return(table_c_vs_t) # this should return all dataframes needed for matching, within this control established
}
# create function to apply "createTable_control_vs_treatment" for all controls by looping through our pre-established controlVars
loopThruControls <- function(match_extents_list,controlVars) {
tableForMatching <- list()
for(i in 1:length(controlVars))
{
tableForMatching[[i]] <- createTable_control_vs_treatment(match_extents_list, controlVars[i])
}
names(tableForMatching) <- controlVars
return(tableForMatching)
}
mydataset <- loopThruControls(datalist, controlVars)
# write data to be matched on
setwd(wd_out)
for(i in 1:length(mydataset))
{
for(j in 1:length(mydataset[[i]]))
{
write_csv(mydataset[[i]][[j]], paste0(names(mydataset[[i]][j]), ".csv"))
}
}
|
#Setting WD
WD <- getwd()
#igraph
library(igraph)
# Non-Parade Adjacency Table
NoParade<- as.matrix(read.csv("MKN_Time_NonParade.csv",header = TRUE))
# Generating Graph
Attraction <- NoParade[,1]
NoParade <- NoParade[, -1]
colnames(NoParade) <- rownames(NoParade) <- Attraction
NoParade[is.na(NoParade)] <- 0
NoParade<- graph.adjacency(NoParade, weighted = TRUE)
#\#\#\#\#\
# Adding Additial Thematic Attributes
#434E9F Blue
#BA0A30 Pink
# Creating BTA Colour Ramp
palf <- colorRampPalette(c("#434E9F", "#BA0A30"))
plot(x=10:1, y=1:10, pch=19, cex=3, col=palf(7))
NodeAttribute<- read.csv("MKN_Node_Attributes.csv",header = TRUE)
names(NodeAttribute)
colnames(NodeAttribute)[1] = "Attraction"
Land <- as.character(NodeAttribute$Land)
LandColour <- as.character(NodeAttribute$LandColour)
ClassColour <- as.character(NodeAttribute$Class.Colour)
Class <- as.character(NodeAttribute$Class)
RideType <- as.character(NodeAttribute$Ride.Type)
RideColour <- as.character(NodeAttribute$Ride.Type.Colour)
BigThrills <- as.character(NodeAttribute$BigThrills)
BigThrillsColour <- as.character(NodeAttribute$BigThrillColour)
ParadeNode <- as.character(NodeAttribute$ParadeNode)
ParadeLab <- as.character(NodeAttribute$ParadeLab)
NodeSLabel <- as.character(NodeAttribute$NodeSLabel)
PPColour_NP <- as.character(NodeAttribute$PPColour_NP)
PPColour_P <- as.character(NodeAttribute$PPColour_P)
BTParade <- as.character(NodeAttribute$BT_Parade)
BTNoParade <- as.character(NodeAttribute$BT_NoParade)
## Adding Attribute Data
V(NoParade)$Land <- Land
V(NoParade)$LandColour <- LandColour
V(NoParade)$Class <- Class
V(NoParade)$ClassColour<- ClassColour
V(NoParade)$RideType<- RideType
V(NoParade)$RideColour<- RideColour
V(NoParade)$BigThrills <- BigThrills
V(NoParade)$BigThrillsColour <- BigThrillsColour
V(NoParade)$PPColour_NP <- PPColour_NP
V(NoParade)$BTNoParade <- BTNoParade
vertex_attr(NoParade)
edge_attr(NoParade)
E(NoParade)
gsize(NoParade)
write.table((edge_attr(NoParade)),file="NoPEdge.txt",row.names = FALSE)
#Connections go down each row.
#\#\#\#\#\
# Plotting GraphS
## Setting up Additional Variables
l <- layout_with_fr(NoParade)
plot(NoParade,
edge.arrow.size=0,
edge.color="black",
layout=l,
vertex.label=NA,
vertex.color=V(NoParade)$ClassColour,
vertex.size=8)
#Themed Land Output
LandNames <- c("Main Street USA","Fantasyland","Adventureland","Frontierland","Liberty Square","Tomorrowland")
LndColour <- c("#D2242D","#D7147D","#F78A2F","#856858","#1AB1E6","#254390")
plot(NoParade,
edge.arrow.size=.1,
vertex.color=V(NoParade)$LandColour,
vertex.size=8,
vertex.label=NA,
vertex.frame.color="black",
edge.curved=.2,
edge.color="#616161",
layout=l,
frame=FALSE)
legend(x=-1.935702, y=0.7817387, LandNames, pch=21,
col="black", pt.bg=LndColour, pt.cex=2, cex=0.8, bty="n", ncol=1)
text(x=-2.008949,y=0.9881603,pos=4,labels="Magic Kingdom Themed Lands",cex=NULL)
# OUTPUT Two
# Plotting Based on Node Type
Class #"Entrance" #"Pathway" #"Attraction"
ClassColour #"#FFFE00" #"#00CDFF" #"#FF3200"
plot(NoParade,
edge.arrow.size=.1, #You can make this 0 to get rid of arrows.
vertex.color=V(NoParade)$ClassColour,
vertex.size=8,
vertex.label=NA,
vertex.frame.color="black",
edge.curved=.2,
edge.color="#616161",
layout=l,
frame=FALSE)
text(x=-2.008949,y=0.9881603,pos=4,labels="Magic Kingdom Network",cex=NULL)
legend(x=-1.935702, y=0.7817387, c("Entrance","Pathway","Attraction"), pch=21,
col="black", pt.bg=c("#FFFE00","#00CDFF","#FF3200"), pt.cex=2, cex=0.8, bty="n", ncol=1)
# OUTPUT THREE: Big Thrill Attractions
## Making Legend
Legend <- data.frame(NodeAttribute$BigThrills,NodeAttribute$BigThrillColour)
Legend <- na.omit(Legend)
names(Legend)
colnames(Legend)[1] = "BigThrill"
colnames(Legend)[2]="Colour"
colnames(Legend)[2]="Colour"
Legend$Colour <- as.character(Legend$Colour)
Legend
## Plotting
plot(NoParade,
edge.arrow.size=.1,
vertex.color=V(NoParade)$BigThrillsColour,
vertex.size=8,
vertex.label= NA,
vertex.label.color="black",
vertex.label.cex=0.8,
vertex.label.dist=1,
vertex.frame.color="black",
vertex.label.family="Arial",
edge.curved=.2,
edge.color="#616161",
layout=l,
frame=FALSE)
text(x=-2.539197,y=0.9976618,pos=4,labels="'Big Thrills' Attraction Name",cex=NULL)
legend(x=-2.580599, y=0.8906897, Legend$BigThrill, pch=21,
col="black", pt.bg=Legend$Colour,
pt.cex=2, cex=.8, bty="n", ncol=1)
## Parade Graph
Parade<- as.matrix(read.csv("MKN_Time_Parade.csv",header = TRUE))
Attraction <- Parade[,1]
Parade <- Parade[, -1]
colnames(Parade) <- rownames(Parade) <- Attraction
Parade[is.na(Parade)] <- 0
Parade<- graph.adjacency(Parade, weighted = TRUE)
plot(Parade,
edge.arrow.size=0,
edge.color=E(Parade)$EColour)
#Parade Route Edge Attributes
edge_attr(Parade)
write.table((edge_attr(Parade)),file="PEdge.txt",row.names = FALSE)
#Changing Parade Route Edge Attributes
EdgeAttribute<- read.csv("MKN_Edge_Attributes.csv",header = TRUE)
names(EdgeAttribute)
EdgeColour <- as.character(EdgeAttribute$E.Colour)
E(Parade)$EColour <- EdgeColour
#Checking the Edge IDs
AO<- get.edge.ids(Parade,c("Astro Orbiter","PeopleMover"))
AO #That's correct.
#Changing Parade Route Node Attributes
V(Parade)$ParadeNode <- ParadeNode
V(Parade)$ParadeLab <- ParadeLab
V(Parade)$NodeSLabel <- NodeSLabel
V(Parade)$PPColour_P <- PPColour_P
V(Parade)$BTParade <- BTParade
#Plotting the Parade Park Route
plot(Parade,
edge.arrow.size=.1,
vertex.color=V(Parade)$ParadeNode,
vertex.size=8,
vertex.label= NA,
vertex.label.color="Black",
vertex.label.cex=0.8,
vertex.label.dist=1,
vertex.frame.color="black",
vertex.label.family="Arial",
edge.curved=.2,
edge.color=E(Parade)$EColour,
layout=l,
frame=FALSE)
legend(x=-2.151107, y=0.0221884, c("Passes Directly","Does Not Pass"), pch=21,
col="black", pt.bg=c("#443C3C","#CCCCBE"), pt.cex=2, cex=.8, bty="n", ncol=1)
legend(x=0.184514, y=0.991471, c("Impacted by Parade Route"), lty=1,
col="#BA0A30", pt.cex=2, cex=.8, bty="n", ncol=1)
text(x=-2.046004,y=0.2557505,pos=4,labels="Parade Route",cex=NULL)
gsize(Parade)
## Calculating Shortest Paths from ALL NODES
Parade_ShortestPathTime <- (s.paths <- shortest.paths(Parade, algorithm = "dijkstra")) #Shows all Shortest Paths Between Nodes
write.csv(Parade_ShortestPathTime,file="Parade_ShortPathTime_V2.csv")
NoParade_ShortestPathTime <- (s.paths <- shortest.paths(NoParade, algorithm = "dijkstra"))
write.csv(NoParade_ShortestPathTime,file="NoParade_ShortPathTime_V2.csv")
#Shortest Path Times to Big Thrill Rides are greater during the parade.
# Calculating Shortest Paths from CC to Big Thrill Rides
BTRides <- c("Pirates of the Caribbean",
"Splash Mountain",
"Big Thunder Mountain Railroad",
"Peter Pan's Flight",
"The Barnstormer",
"Seven Dwarfs Mine Train",
"Space Mountain")
shortest_paths(Parade,"Cinderella's Castle",BTRides)
shortest_paths(NoParade,"Cinderella's Castle",BTRides)
Short <- shortest_paths(Parade,
from = V(Parade)[name=="Cinderella's Castle"],
to = V(Parade)[name=="Splash Mountain"],
output = "both") #Nodes and Edges Listed
Short
#Checking the Shortest Path Times
A <- E(Parade)$weight[get.edge.ids(Parade,c("Cinderella's Castle","ALB"))]
B <- E(Parade)$weight[get.edge.ids(Parade,c("ALB","Swiss Family Treehouse"))]
C <- E(Parade)$weight[get.edge.ids(Parade,c("Swiss Family Treehouse","Jungle Cruise"))]
D <- E(Parade)$weight[get.edge.ids(Parade,c("Jungle Cruise","Pirates of the Caribbean"))]
E <- E(Parade)$weight[get.edge.ids(Parade,c("Pirates of the Caribbean","FLB1"))]
G <- E(Parade)$weight[get.edge.ids(Parade,c("FLB1","Splash Mountain"))]
Test1 <- c(A,B,C,D,E,G)
sum(Test1) #19
# Plotting Peter Pan's Flight
l <- layout_with_fr(NoParade)
#No Parade
plot(NoParade,
edge.arrow.size=.1,
vertex.color=V(NoParade)$PPColour_NP,
vertex.size=8,
vertex.label= NA,
vertex.label.color="black",
vertex.label.cex=0.8,
vertex.label.dist=1,
vertex.frame.color="black",
vertex.label.family="Arial",
edge.curved=.2,
edge.color="#616161",
layout=l,
frame=FALSE)
#Parade Running
plot(Parade,
edge.arrow.size=.1,
vertex.color=V(Parade)$PPColour_P,
vertex.size=8,
vertex.label= NA,
vertex.label.color="black",
vertex.label.cex=0.8,
vertex.label.dist=1,
vertex.frame.color="black",
vertex.label.family="Arial",
edge.curved=.2,
edge.color="#616161",
layout=l,
frame=FALSE)
#Plotting Big Thuder Mountain
#No Parade
plot(NoParade,
edge.arrow.size=.1,
vertex.color=V(NoParade)$BTNoParade,
vertex.size=8,
vertex.label= NA,
vertex.label.color="black",
vertex.label.cex=0.8,
vertex.label.dist=1,
vertex.frame.color="black",
vertex.label.family="Arial",
edge.curved=.2,
edge.color="#616161",
layout=l,
frame=FALSE)
#Parade
plot(Parade,
edge.arrow.size=.1,
vertex.color=V(Parade)$BTParade,
vertex.size=8,
vertex.label= NA,
vertex.label.color="black",
vertex.label.cex=0.8,
vertex.label.dist=1,
vertex.frame.color="black",
vertex.label.family="Arial",
edge.curved=.2,
edge.color="#616161",
layout=l,
frame=FALSE)
#Chi Squared
AllChi<- read.csv("All_Chi.csv",header = TRUE)
All_Chi <- chisq.test(AllChi)
All_Chi
BTAChi <- read.csv("Chi_Rides.csv",header = TRUE)
BTA_Chi <- chisq.test(BTAChi)
BTA_Chi
|
/HJVC0_R Script.R
|
no_license
|
HannahRegis/GEOG0125_Appendix-1
|
R
| false
| false
| 10,083
|
r
|
#Setting WD
WD <- getwd()
#igraph
library(igraph)
# Non-Parade Adjacency Table
NoParade<- as.matrix(read.csv("MKN_Time_NonParade.csv",header = TRUE))
# Generating Graph
Attraction <- NoParade[,1]
NoParade <- NoParade[, -1]
colnames(NoParade) <- rownames(NoParade) <- Attraction
NoParade[is.na(NoParade)] <- 0
NoParade<- graph.adjacency(NoParade, weighted = TRUE)
#\#\#\#\#\
# Adding Additial Thematic Attributes
#434E9F Blue
#BA0A30 Pink
# Creating BTA Colour Ramp
palf <- colorRampPalette(c("#434E9F", "#BA0A30"))
plot(x=10:1, y=1:10, pch=19, cex=3, col=palf(7))
NodeAttribute<- read.csv("MKN_Node_Attributes.csv",header = TRUE)
names(NodeAttribute)
colnames(NodeAttribute)[1] = "Attraction"
Land <- as.character(NodeAttribute$Land)
LandColour <- as.character(NodeAttribute$LandColour)
ClassColour <- as.character(NodeAttribute$Class.Colour)
Class <- as.character(NodeAttribute$Class)
RideType <- as.character(NodeAttribute$Ride.Type)
RideColour <- as.character(NodeAttribute$Ride.Type.Colour)
BigThrills <- as.character(NodeAttribute$BigThrills)
BigThrillsColour <- as.character(NodeAttribute$BigThrillColour)
ParadeNode <- as.character(NodeAttribute$ParadeNode)
ParadeLab <- as.character(NodeAttribute$ParadeLab)
NodeSLabel <- as.character(NodeAttribute$NodeSLabel)
PPColour_NP <- as.character(NodeAttribute$PPColour_NP)
PPColour_P <- as.character(NodeAttribute$PPColour_P)
BTParade <- as.character(NodeAttribute$BT_Parade)
BTNoParade <- as.character(NodeAttribute$BT_NoParade)
## Adding Attribute Data
V(NoParade)$Land <- Land
V(NoParade)$LandColour <- LandColour
V(NoParade)$Class <- Class
V(NoParade)$ClassColour<- ClassColour
V(NoParade)$RideType<- RideType
V(NoParade)$RideColour<- RideColour
V(NoParade)$BigThrills <- BigThrills
V(NoParade)$BigThrillsColour <- BigThrillsColour
V(NoParade)$PPColour_NP <- PPColour_NP
V(NoParade)$BTNoParade <- BTNoParade
vertex_attr(NoParade)
edge_attr(NoParade)
E(NoParade)
gsize(NoParade)
write.table((edge_attr(NoParade)),file="NoPEdge.txt",row.names = FALSE)
#Connections go down each row.
#\#\#\#\#\
# Plotting GraphS
## Setting up Additional Variables
l <- layout_with_fr(NoParade)
plot(NoParade,
edge.arrow.size=0,
edge.color="black",
layout=l,
vertex.label=NA,
vertex.color=V(NoParade)$ClassColour,
vertex.size=8)
#Themed Land Output
LandNames <- c("Main Street USA","Fantasyland","Adventureland","Frontierland","Liberty Square","Tomorrowland")
LndColour <- c("#D2242D","#D7147D","#F78A2F","#856858","#1AB1E6","#254390")
plot(NoParade,
edge.arrow.size=.1,
vertex.color=V(NoParade)$LandColour,
vertex.size=8,
vertex.label=NA,
vertex.frame.color="black",
edge.curved=.2,
edge.color="#616161",
layout=l,
frame=FALSE)
legend(x=-1.935702, y=0.7817387, LandNames, pch=21,
col="black", pt.bg=LndColour, pt.cex=2, cex=0.8, bty="n", ncol=1)
text(x=-2.008949,y=0.9881603,pos=4,labels="Magic Kingdom Themed Lands",cex=NULL)
# OUTPUT Two
# Plotting Based on Node Type
Class #"Entrance" #"Pathway" #"Attraction"
ClassColour #"#FFFE00" #"#00CDFF" #"#FF3200"
plot(NoParade,
edge.arrow.size=.1, #You can make this 0 to get rid of arrows.
vertex.color=V(NoParade)$ClassColour,
vertex.size=8,
vertex.label=NA,
vertex.frame.color="black",
edge.curved=.2,
edge.color="#616161",
layout=l,
frame=FALSE)
text(x=-2.008949,y=0.9881603,pos=4,labels="Magic Kingdom Network",cex=NULL)
legend(x=-1.935702, y=0.7817387, c("Entrance","Pathway","Attraction"), pch=21,
col="black", pt.bg=c("#FFFE00","#00CDFF","#FF3200"), pt.cex=2, cex=0.8, bty="n", ncol=1)
# OUTPUT THREE: Big Thrill Attractions
## Making Legend
Legend <- data.frame(NodeAttribute$BigThrills,NodeAttribute$BigThrillColour)
Legend <- na.omit(Legend)
names(Legend)
colnames(Legend)[1] = "BigThrill"
colnames(Legend)[2]="Colour"
colnames(Legend)[2]="Colour"
Legend$Colour <- as.character(Legend$Colour)
Legend
## Plotting
plot(NoParade,
edge.arrow.size=.1,
vertex.color=V(NoParade)$BigThrillsColour,
vertex.size=8,
vertex.label= NA,
vertex.label.color="black",
vertex.label.cex=0.8,
vertex.label.dist=1,
vertex.frame.color="black",
vertex.label.family="Arial",
edge.curved=.2,
edge.color="#616161",
layout=l,
frame=FALSE)
text(x=-2.539197,y=0.9976618,pos=4,labels="'Big Thrills' Attraction Name",cex=NULL)
legend(x=-2.580599, y=0.8906897, Legend$BigThrill, pch=21,
col="black", pt.bg=Legend$Colour,
pt.cex=2, cex=.8, bty="n", ncol=1)
## Parade Graph
Parade<- as.matrix(read.csv("MKN_Time_Parade.csv",header = TRUE))
Attraction <- Parade[,1]
Parade <- Parade[, -1]
colnames(Parade) <- rownames(Parade) <- Attraction
Parade[is.na(Parade)] <- 0
Parade<- graph.adjacency(Parade, weighted = TRUE)
plot(Parade,
edge.arrow.size=0,
edge.color=E(Parade)$EColour)
#Parade Route Edge Attributes
edge_attr(Parade)
write.table((edge_attr(Parade)),file="PEdge.txt",row.names = FALSE)
#Changing Parade Route Edge Attributes
EdgeAttribute<- read.csv("MKN_Edge_Attributes.csv",header = TRUE)
names(EdgeAttribute)
EdgeColour <- as.character(EdgeAttribute$E.Colour)
E(Parade)$EColour <- EdgeColour
#Checking the Edge IDs
AO<- get.edge.ids(Parade,c("Astro Orbiter","PeopleMover"))
AO #That's correct.
#Changing Parade Route Node Attributes
V(Parade)$ParadeNode <- ParadeNode
V(Parade)$ParadeLab <- ParadeLab
V(Parade)$NodeSLabel <- NodeSLabel
V(Parade)$PPColour_P <- PPColour_P
V(Parade)$BTParade <- BTParade
#Plotting the Parade Park Route
plot(Parade,
edge.arrow.size=.1,
vertex.color=V(Parade)$ParadeNode,
vertex.size=8,
vertex.label= NA,
vertex.label.color="Black",
vertex.label.cex=0.8,
vertex.label.dist=1,
vertex.frame.color="black",
vertex.label.family="Arial",
edge.curved=.2,
edge.color=E(Parade)$EColour,
layout=l,
frame=FALSE)
legend(x=-2.151107, y=0.0221884, c("Passes Directly","Does Not Pass"), pch=21,
col="black", pt.bg=c("#443C3C","#CCCCBE"), pt.cex=2, cex=.8, bty="n", ncol=1)
legend(x=0.184514, y=0.991471, c("Impacted by Parade Route"), lty=1,
col="#BA0A30", pt.cex=2, cex=.8, bty="n", ncol=1)
text(x=-2.046004,y=0.2557505,pos=4,labels="Parade Route",cex=NULL)
gsize(Parade)
## Calculating Shortest Paths from ALL NODES
Parade_ShortestPathTime <- (s.paths <- shortest.paths(Parade, algorithm = "dijkstra")) #Shows all Shortest Paths Between Nodes
write.csv(Parade_ShortestPathTime,file="Parade_ShortPathTime_V2.csv")
NoParade_ShortestPathTime <- (s.paths <- shortest.paths(NoParade, algorithm = "dijkstra"))
write.csv(NoParade_ShortestPathTime,file="NoParade_ShortPathTime_V2.csv")
#Shortest Path Times to Big Thrill Rides are greater during the parade.
# Calculating Shortest Paths from CC to Big Thrill Rides
BTRides <- c("Pirates of the Caribbean",
"Splash Mountain",
"Big Thunder Mountain Railroad",
"Peter Pan's Flight",
"The Barnstormer",
"Seven Dwarfs Mine Train",
"Space Mountain")
shortest_paths(Parade,"Cinderella's Castle",BTRides)
shortest_paths(NoParade,"Cinderella's Castle",BTRides)
Short <- shortest_paths(Parade,
from = V(Parade)[name=="Cinderella's Castle"],
to = V(Parade)[name=="Splash Mountain"],
output = "both") #Nodes and Edges Listed
Short
#Checking the Shortest Path Times
A <- E(Parade)$weight[get.edge.ids(Parade,c("Cinderella's Castle","ALB"))]
B <- E(Parade)$weight[get.edge.ids(Parade,c("ALB","Swiss Family Treehouse"))]
C <- E(Parade)$weight[get.edge.ids(Parade,c("Swiss Family Treehouse","Jungle Cruise"))]
D <- E(Parade)$weight[get.edge.ids(Parade,c("Jungle Cruise","Pirates of the Caribbean"))]
E <- E(Parade)$weight[get.edge.ids(Parade,c("Pirates of the Caribbean","FLB1"))]
G <- E(Parade)$weight[get.edge.ids(Parade,c("FLB1","Splash Mountain"))]
Test1 <- c(A,B,C,D,E,G)
sum(Test1) #19
# Plotting Peter Pan's Flight
l <- layout_with_fr(NoParade)
#No Parade
plot(NoParade,
edge.arrow.size=.1,
vertex.color=V(NoParade)$PPColour_NP,
vertex.size=8,
vertex.label= NA,
vertex.label.color="black",
vertex.label.cex=0.8,
vertex.label.dist=1,
vertex.frame.color="black",
vertex.label.family="Arial",
edge.curved=.2,
edge.color="#616161",
layout=l,
frame=FALSE)
#Parade Running
plot(Parade,
edge.arrow.size=.1,
vertex.color=V(Parade)$PPColour_P,
vertex.size=8,
vertex.label= NA,
vertex.label.color="black",
vertex.label.cex=0.8,
vertex.label.dist=1,
vertex.frame.color="black",
vertex.label.family="Arial",
edge.curved=.2,
edge.color="#616161",
layout=l,
frame=FALSE)
#Plotting Big Thuder Mountain
#No Parade
plot(NoParade,
edge.arrow.size=.1,
vertex.color=V(NoParade)$BTNoParade,
vertex.size=8,
vertex.label= NA,
vertex.label.color="black",
vertex.label.cex=0.8,
vertex.label.dist=1,
vertex.frame.color="black",
vertex.label.family="Arial",
edge.curved=.2,
edge.color="#616161",
layout=l,
frame=FALSE)
#Parade
plot(Parade,
edge.arrow.size=.1,
vertex.color=V(Parade)$BTParade,
vertex.size=8,
vertex.label= NA,
vertex.label.color="black",
vertex.label.cex=0.8,
vertex.label.dist=1,
vertex.frame.color="black",
vertex.label.family="Arial",
edge.curved=.2,
edge.color="#616161",
layout=l,
frame=FALSE)
#Chi Squared
AllChi<- read.csv("All_Chi.csv",header = TRUE)
All_Chi <- chisq.test(AllChi)
All_Chi
BTAChi <- read.csv("Chi_Rides.csv",header = TRUE)
BTA_Chi <- chisq.test(BTAChi)
BTA_Chi
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.02,family="gaussian",standardize=TRUE)
sink('./upper_aerodigestive_tract_009.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Correlation/upper_aerodigestive_tract/upper_aerodigestive_tract_009.R
|
no_license
|
esbgkannan/QSMART
|
R
| false
| false
| 388
|
r
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.02,family="gaussian",standardize=TRUE)
sink('./upper_aerodigestive_tract_009.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
command.arguments <- commandArgs(trailingOnly = TRUE);
output.directory <- command.arguments[1];
####################################################################################################
setwd(output.directory);
library(LearnBayes);
library(ggplot2);
library(scales);
### 4.8.3(a) #######################################################################################
#
# g(pN,pS|Data)
# = g(pN,pS) * g(Data|pN,pS)
# =~ 1 * (pN ^ yN) * (1 - pN)^(nN - yN) * (pS ^ yS) * (1 - pS) ^ (nS - yS)
# = (pN ^ yN) * (1 - pN)^(nN - yN) * (pS ^ yS) * (1 - pS) ^ (nS - yS)
# = pN^(yN+1-1)) * (1 - pN)^(nN - yN + 1 - 1) * pS^(yS+1-1) * (1 - pS) ^ (nS - yS + 1 - 1)
#
# Next, recall that the probability density of a beta distribution is given by:
#
# f_{Beta}(x; alpha, beta) =~ x ^ (alpha - 1) * (1-x) ^ (beta - 1)
#
# Hence, we see that
# (a) pN and pS are posteriorly independent,
# (b) pN ~ Beta(yN + 1, nN - yN + 1), and
# (c) pS ~ Beta(yS + 1, nS - yS + 1), and
#
### 4.8.3(b) #######################################################################################
y.N <- 1601;
z.N <- 162527;
y.S <- 510;
z.S <- 412368;
n.N <- y.N + z.N;
n.S <- y.S + z.S;
sample.size <- 1e+5;
pN.sample <- rbeta(n = sample.size, shape1 = y.N + 1, shape2 = z.N + 1);
pS.sample <- rbeta(n = sample.size, shape1 = y.S + 1, shape2 = z.S + 1);
relative.risk.sample <- pN.sample / pS.sample;
### 4.8.3(c) #######################################################################################
png("Fig1_histogram-relative-risk.png");
qplot(x = relative.risk.sample, geom = "histogram", binwidth = 0.01);
dev.off();
quantile(x = relative.risk.sample, probs = c(0.025, 0.5, 0.975));
### 4.8.3(d) #######################################################################################
pN.minus.pS.sample <- pN.sample - pS.sample;
png("Fig2_histogram-pN-minus-pS.png");
qplot(x = pN.minus.pS.sample, geom = "histogram", binwidth = 1e-5);
dev.off();
### 4.8.3(e) #######################################################################################
mean(pN.minus.pS.sample > 0);
####################################################################################################
####################################################################################################
png("Fig3_pN-pS-posterior-distributions.png");
DF.pN <- data.frame(
proportion = pN.sample,
safety = factor(rep('None',length(pN.sample)),levels=c('Seat belt','None'))
);
DF.pS <- data.frame(
proportion = pS.sample,
safety = factor(rep('Seat belt',length(pN.sample)),levels=c('Seat belt','None'))
);
DF.temp <- rbind(DF.pN,DF.pS);
qplot(data = DF.temp, x = proportion, colour = safety, geom = "density");
dev.off();
####################################################################################################
contingency.table <- matrix(
c(y.N,y.S,z.N,z.S),
nrow = 2,
dimnames = list(c("none", "seat.belt"),c("fatal", "non-fatal"))
);
contingency.table;
fisher.test(contingency.table);
|
/exercises/statistics/bayesian/albert/chap04/exercises/exercise-4-8-3/code/albert-exercise-4-8-3.R
|
no_license
|
paradisepilot/statistics
|
R
| false
| false
| 3,035
|
r
|
command.arguments <- commandArgs(trailingOnly = TRUE);
output.directory <- command.arguments[1];
####################################################################################################
setwd(output.directory);
library(LearnBayes);
library(ggplot2);
library(scales);
### 4.8.3(a) #######################################################################################
#
# g(pN,pS|Data)
# = g(pN,pS) * g(Data|pN,pS)
# =~ 1 * (pN ^ yN) * (1 - pN)^(nN - yN) * (pS ^ yS) * (1 - pS) ^ (nS - yS)
# = (pN ^ yN) * (1 - pN)^(nN - yN) * (pS ^ yS) * (1 - pS) ^ (nS - yS)
# = pN^(yN+1-1)) * (1 - pN)^(nN - yN + 1 - 1) * pS^(yS+1-1) * (1 - pS) ^ (nS - yS + 1 - 1)
#
# Next, recall that the probability density of a beta distribution is given by:
#
# f_{Beta}(x; alpha, beta) =~ x ^ (alpha - 1) * (1-x) ^ (beta - 1)
#
# Hence, we see that
# (a) pN and pS are posteriorly independent,
# (b) pN ~ Beta(yN + 1, nN - yN + 1), and
# (c) pS ~ Beta(yS + 1, nS - yS + 1), and
#
### 4.8.3(b) #######################################################################################
y.N <- 1601;
z.N <- 162527;
y.S <- 510;
z.S <- 412368;
n.N <- y.N + z.N;
n.S <- y.S + z.S;
sample.size <- 1e+5;
pN.sample <- rbeta(n = sample.size, shape1 = y.N + 1, shape2 = z.N + 1);
pS.sample <- rbeta(n = sample.size, shape1 = y.S + 1, shape2 = z.S + 1);
relative.risk.sample <- pN.sample / pS.sample;
### 4.8.3(c) #######################################################################################
png("Fig1_histogram-relative-risk.png");
qplot(x = relative.risk.sample, geom = "histogram", binwidth = 0.01);
dev.off();
quantile(x = relative.risk.sample, probs = c(0.025, 0.5, 0.975));
### 4.8.3(d) #######################################################################################
pN.minus.pS.sample <- pN.sample - pS.sample;
png("Fig2_histogram-pN-minus-pS.png");
qplot(x = pN.minus.pS.sample, geom = "histogram", binwidth = 1e-5);
dev.off();
### 4.8.3(e) #######################################################################################
mean(pN.minus.pS.sample > 0);
####################################################################################################
####################################################################################################
png("Fig3_pN-pS-posterior-distributions.png");
DF.pN <- data.frame(
proportion = pN.sample,
safety = factor(rep('None',length(pN.sample)),levels=c('Seat belt','None'))
);
DF.pS <- data.frame(
proportion = pS.sample,
safety = factor(rep('Seat belt',length(pN.sample)),levels=c('Seat belt','None'))
);
DF.temp <- rbind(DF.pN,DF.pS);
qplot(data = DF.temp, x = proportion, colour = safety, geom = "density");
dev.off();
####################################################################################################
contingency.table <- matrix(
c(y.N,y.S,z.N,z.S),
nrow = 2,
dimnames = list(c("none", "seat.belt"),c("fatal", "non-fatal"))
);
contingency.table;
fisher.test(contingency.table);
|
a = read.csv("https://raw.githubusercontent.com/pluieciel/econometrics/master/costsalary.csv", header = TRUE, sep=";")
x=a$Costs
y=a$Salary
r=lm(y~x)
summary(r)
plot(x,y)
abline(r)
|
/R/Class 1 OLS.R
|
no_license
|
pluieciel/econometrics
|
R
| false
| false
| 181
|
r
|
a = read.csv("https://raw.githubusercontent.com/pluieciel/econometrics/master/costsalary.csv", header = TRUE, sep=";")
x=a$Costs
y=a$Salary
r=lm(y~x)
summary(r)
plot(x,y)
abline(r)
|
# Packages ----------------------------------------------------------
library(googlesheets)
library(tidyverse)
library(stringr)
# Inputs ------------------------------------------------------------
host <- "[HOST]"
year <- "[YEAR]"
# Get looking for teammates data ------------------------------------
consultants_name <- paste0("DataFest ", year, " @ ", host, " - Consultant Sign up (Responses)")
consultants <- gs_title(consultants_name) %>%
gs_read()
# Rename columns ----------------------------------------------------
names(consultants) <- names(consultants) %>%
str_replace("Email address", "email")
str_replace("Your affiliation: .{1,}", "affiliation")
str_replace("Your title:", "title")
str_replace("Which .{1,}", "shift_preference")
str_replace("How many .{1,}", "hours_preference")
str_replace("Check if you agree", "photo")
str_replace("\\:", "")
str_replace("-", "")
str_replace_all(" ", "_")
tolower()
# Assign role -------------------------------------------------------
consultants <- consultants %>%
mutate(role = "Consultant")
# Write consultants data --------------------------------------------
write_csv(consultants, path = "data/consultants.csv")
# Save consultants emails for easy emailing -------------------------
cat(consultants$email, sep = ", ",
file = "email-lists/consultants-emails.txt")
|
/03_get_cons_data.R
|
no_license
|
mine-cetinkaya-rundel/datafest
|
R
| false
| false
| 1,361
|
r
|
# Packages ----------------------------------------------------------
library(googlesheets)
library(tidyverse)
library(stringr)
# Inputs ------------------------------------------------------------
host <- "[HOST]"
year <- "[YEAR]"
# Get looking for teammates data ------------------------------------
consultants_name <- paste0("DataFest ", year, " @ ", host, " - Consultant Sign up (Responses)")
consultants <- gs_title(consultants_name) %>%
gs_read()
# Rename columns ----------------------------------------------------
names(consultants) <- names(consultants) %>%
str_replace("Email address", "email")
str_replace("Your affiliation: .{1,}", "affiliation")
str_replace("Your title:", "title")
str_replace("Which .{1,}", "shift_preference")
str_replace("How many .{1,}", "hours_preference")
str_replace("Check if you agree", "photo")
str_replace("\\:", "")
str_replace("-", "")
str_replace_all(" ", "_")
tolower()
# Assign role -------------------------------------------------------
consultants <- consultants %>%
mutate(role = "Consultant")
# Write consultants data --------------------------------------------
write_csv(consultants, path = "data/consultants.csv")
# Save consultants emails for easy emailing -------------------------
cat(consultants$email, sep = ", ",
file = "email-lists/consultants-emails.txt")
|
\name{identify}
\alias{value_xy}
\alias{value_cr}
\alias{value_ll}
\alias{coord_xy}
\alias{coord_cr}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Get value and coordinates from location
}
\description{
Functions to extract values of raster image from given location, specified by coordinates in raster projection, by cell position or by geogpaphical coordinates. Additional utils to convert cell position and planar coordinates mutually.
}
\usage{
value_xy(obj, ...)
value_ll(obj, ...)
value_cr(obj, ...)
coord_xy(obj, ...)
coord_cr(obj, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{obj}{
Object of class \code{ursaRaster}.
}
\item{\dots}{the set of arguments, which are recognized via their names (using \link[base:regex]{regular expressions}) and classes:
\tabular{llll}{
\emph{Matched pattern}\code{ } \tab \emph{Function}\code{ } \tab \emph{Used name}
\cr\code{ind} \tab \code{*_*} \tab \code{ind} \tab
Index (positive \code{integer}) in internal value storage.
\cr\code{^c} \tab \code{*_cr} \tab \code{col} \tab
Integer of non-zero length. Index of column/sample Length of column and row indices should be the same for creating set of two-dimension coordinates.\cr
\cr\code{^r} \tab \code{*_cr} \tab \code{row} \tab
Integer of non-zero length. Index of row/line. Length of column and row indices should be the same for creating set of two-dimension coordinates.
\cr\code{^x} \tab \code{*_xy} \tab \code{x} \tab
Numeric of non-zero length. X-axis coordinate in grid of \code{obj}. The length of X-axis and Y-axis coordinates should be the same for creating set of two-dimension coordinates.
\cr\code{^y} \tab \code{*_xy} \tab \code{y} \tab
Numeric of non-zero length. Y-axis coordinate in grid of \code{obj}. The length of X-axis and Y-axis coordinates should be the same for creating set of two-dimension coordinates.
\cr\code{^lon} \tab \code{value_ll} \tab \code{lon} \tab
Longitude. The length of longitudes and latitudes should be the same for creating set of two-dimension coordinates.
\cr\code{^lat} \tab \code{value_ll} \tab \code{lat} \tab
Latitude. The length of longitudes and latitudes should be the same for creating set of two-dimension coordinates.
}
}
}
\details{
\code{value_xy} returns values for location, which is specified by planar coordinates (x, y).\cr
\code{value_cr} returns values for location, which is specified by cell posisition (column, row) relative to upper-left corner of image .\cr
\code{value_ll} returns values for location, which is specified by longitude and latitude (long, lat).
\code{coord_xy} transforms planar coordinates (x, y) to cell position (column, row).\cr
\code{coord_cr} transforms cell position (column, row) to planar coordinates (x, y).
It is required to use a couple of coordinate vectors: \code{(x, y)}, \code{(c, r)} or \code{(lon, lat)} of the same length. The unary argument is interpreted as index in internal value storage.
Position in column/row coordinates starts from upper-lever corner. The cell of upper-level corner has (1, 1) coordinates (in \R indices starts from \code{1L}), whereas in some GIS the same corner cell has (0, 0) coordinates.
The column names of returned matrix are character format of index in internal value storage. This index can be specify in any function as argument \code{ind} instead of coordinates (planar, geographical, cell position).
}
\value{
For \code{value.*} numeric matrix of raster values. Band values for specific coordinates are by column. Set of specific coordinates are by row. \code{\link[base:colnames]{rownames}} are band names, and \code{\link[base:colnames]{colnames}} are index in internal value storage.
For \code{coord.*} numeric matrix of coordinates with a vector of couple coordinates, one coordinate per one row. \code{\link[base:colnames]{rownames}} are returned coordinates, and \code{\link[base:colnames]{colnames}} are index in internal value storage.
}
%%~ \references{
%%~ %% ~put references to the literature/web site here ~
%%~ }
\author{
Nikita Platonov \email{platonov@sevin.ru}
}
%%~ \note{
%%~ %% ~~further notes~~
%%~ }
%% ~Make other sections like Warning with \section{Warning }{....} ~
%%~ \seealso{
%%~ %% ~~objects to See Also as \code{\link{help}}, ~~~
%%~ }
\examples{
session_grid(NULL)
set.seed(352)
a <- as.integer(ursa_dummy(3,min=0,max=999))
ind <- which(ursa_value(a[1])==890)
print(ind)
msk <- a[1]==890
am <- a[msk]
b <- as.data.frame(am)
b$jx <- b$x+runif(nrow(b),min=-1000,max=1000)
b$jy <- b$y+runif(nrow(b),min=-1000,max=1000)
print(b)
cr1 <- coord_xy(a,x=b$jx,y=b$jy)
cr2 <- coord_xy(a,y=b$y,x=b$x)
cr3 <- coord_xy(a,ind=ind)
print(cr1)
print(list('cr1 and cr2'=all.equal(cr1,cr2)
,'cr2 and cr3'=all.equal(cr2,cr3)
,'cr3 and cr1'=all.equal(cr3,cr1)))
xy1 <- coord_cr(a,c=cr1["c",],r=cr1["r",])
print(xy1)
print(list('in x'=identical(unname(xy1["x",]),b[,"x",drop=TRUE])
,'in y'=identical(unname(xy1["y",]),b[,"y",drop=TRUE])))
val1 <- value_xy(a,x=b$jx,y=b$jy)
val2 <- value_xy(a,x=b$x,y=b$y)
val3 <- value_cr(a,ind=ind)
val4 <- value_cr(a,c=cr1["c",],r=cr1["r",])
print(val1)
print(list('val1 and val2'=all.equal(val1,val2)
,'val2 and val3'=all.equal(val2,val3)
,'val3 and val4'=all.equal(val3,val4)
,'val4 and val1'=all.equal(val4,val1)))
ps <- pixelsize()
v <- value_ll(ps,lon=180,lat=70)
print(c('True scale'=v/with(ursa_grid(ps),1e-6*resx*resy)))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{attribute}
|
/man/identify.Rd
|
no_license
|
nplatonov/ursa
|
R
| false
| false
| 5,739
|
rd
|
\name{identify}
\alias{value_xy}
\alias{value_cr}
\alias{value_ll}
\alias{coord_xy}
\alias{coord_cr}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Get value and coordinates from location
}
\description{
Functions to extract values of raster image from given location, specified by coordinates in raster projection, by cell position or by geogpaphical coordinates. Additional utils to convert cell position and planar coordinates mutually.
}
\usage{
value_xy(obj, ...)
value_ll(obj, ...)
value_cr(obj, ...)
coord_xy(obj, ...)
coord_cr(obj, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{obj}{
Object of class \code{ursaRaster}.
}
\item{\dots}{the set of arguments, which are recognized via their names (using \link[base:regex]{regular expressions}) and classes:
\tabular{llll}{
\emph{Matched pattern}\code{ } \tab \emph{Function}\code{ } \tab \emph{Used name}
\cr\code{ind} \tab \code{*_*} \tab \code{ind} \tab
Index (positive \code{integer}) in internal value storage.
\cr\code{^c} \tab \code{*_cr} \tab \code{col} \tab
Integer of non-zero length. Index of column/sample Length of column and row indices should be the same for creating set of two-dimension coordinates.\cr
\cr\code{^r} \tab \code{*_cr} \tab \code{row} \tab
Integer of non-zero length. Index of row/line. Length of column and row indices should be the same for creating set of two-dimension coordinates.
\cr\code{^x} \tab \code{*_xy} \tab \code{x} \tab
Numeric of non-zero length. X-axis coordinate in grid of \code{obj}. The length of X-axis and Y-axis coordinates should be the same for creating set of two-dimension coordinates.
\cr\code{^y} \tab \code{*_xy} \tab \code{y} \tab
Numeric of non-zero length. Y-axis coordinate in grid of \code{obj}. The length of X-axis and Y-axis coordinates should be the same for creating set of two-dimension coordinates.
\cr\code{^lon} \tab \code{value_ll} \tab \code{lon} \tab
Longitude. The length of longitudes and latitudes should be the same for creating set of two-dimension coordinates.
\cr\code{^lat} \tab \code{value_ll} \tab \code{lat} \tab
Latitude. The length of longitudes and latitudes should be the same for creating set of two-dimension coordinates.
}
}
}
\details{
\code{value_xy} returns values for location, which is specified by planar coordinates (x, y).\cr
\code{value_cr} returns values for location, which is specified by cell posisition (column, row) relative to upper-left corner of image .\cr
\code{value_ll} returns values for location, which is specified by longitude and latitude (long, lat).
\code{coord_xy} transforms planar coordinates (x, y) to cell position (column, row).\cr
\code{coord_cr} transforms cell position (column, row) to planar coordinates (x, y).
It is required to use a couple of coordinate vectors: \code{(x, y)}, \code{(c, r)} or \code{(lon, lat)} of the same length. The unary argument is interpreted as index in internal value storage.
Position in column/row coordinates starts from upper-lever corner. The cell of upper-level corner has (1, 1) coordinates (in \R indices starts from \code{1L}), whereas in some GIS the same corner cell has (0, 0) coordinates.
The column names of returned matrix are character format of index in internal value storage. This index can be specify in any function as argument \code{ind} instead of coordinates (planar, geographical, cell position).
}
\value{
For \code{value.*} numeric matrix of raster values. Band values for specific coordinates are by column. Set of specific coordinates are by row. \code{\link[base:colnames]{rownames}} are band names, and \code{\link[base:colnames]{colnames}} are index in internal value storage.
For \code{coord.*} numeric matrix of coordinates with a vector of couple coordinates, one coordinate per one row. \code{\link[base:colnames]{rownames}} are returned coordinates, and \code{\link[base:colnames]{colnames}} are index in internal value storage.
}
%%~ \references{
%%~ %% ~put references to the literature/web site here ~
%%~ }
\author{
Nikita Platonov \email{platonov@sevin.ru}
}
%%~ \note{
%%~ %% ~~further notes~~
%%~ }
%% ~Make other sections like Warning with \section{Warning }{....} ~
%%~ \seealso{
%%~ %% ~~objects to See Also as \code{\link{help}}, ~~~
%%~ }
\examples{
session_grid(NULL)
set.seed(352)
a <- as.integer(ursa_dummy(3,min=0,max=999))
ind <- which(ursa_value(a[1])==890)
print(ind)
msk <- a[1]==890
am <- a[msk]
b <- as.data.frame(am)
b$jx <- b$x+runif(nrow(b),min=-1000,max=1000)
b$jy <- b$y+runif(nrow(b),min=-1000,max=1000)
print(b)
cr1 <- coord_xy(a,x=b$jx,y=b$jy)
cr2 <- coord_xy(a,y=b$y,x=b$x)
cr3 <- coord_xy(a,ind=ind)
print(cr1)
print(list('cr1 and cr2'=all.equal(cr1,cr2)
,'cr2 and cr3'=all.equal(cr2,cr3)
,'cr3 and cr1'=all.equal(cr3,cr1)))
xy1 <- coord_cr(a,c=cr1["c",],r=cr1["r",])
print(xy1)
print(list('in x'=identical(unname(xy1["x",]),b[,"x",drop=TRUE])
,'in y'=identical(unname(xy1["y",]),b[,"y",drop=TRUE])))
val1 <- value_xy(a,x=b$jx,y=b$jy)
val2 <- value_xy(a,x=b$x,y=b$y)
val3 <- value_cr(a,ind=ind)
val4 <- value_cr(a,c=cr1["c",],r=cr1["r",])
print(val1)
print(list('val1 and val2'=all.equal(val1,val2)
,'val2 and val3'=all.equal(val2,val3)
,'val3 and val4'=all.equal(val3,val4)
,'val4 and val1'=all.equal(val4,val1)))
ps <- pixelsize()
v <- value_ll(ps,lon=180,lat=70)
print(c('True scale'=v/with(ursa_grid(ps),1e-6*resx*resy)))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{attribute}
|
test_that("Row-wise are kept with project", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
v$gr <- rep_len(c("B", "A", "C", "B"), nrow(v))
gr_v <- rowwise(v, gr)
expect_true(is_rowwise_spatvector(gr_v))
gr_v2 <- gr_v %>% terra::project("EPSG:4326")
expect_true(is_rowwise_spatvector(gr_v2))
expect_identical(group_data(gr_v), group_data(gr_v2))
})
test_that("Row-wise are kept with casting", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
v$gr <- rep_len(c("B", "A", "C", "B"), nrow(v))
gr_v <- rowwise(v, gr)
expect_true(is_rowwise_spatvector(gr_v))
gr_v2 <- gr_v %>% terra::centroids()
expect_true(is_rowwise_spatvector(gr_v2))
expect_identical(group_data(gr_v), group_data(gr_v2))
})
test_that("Aggregate can re-rowwise", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
v$gr <- rep_len(c("B", "A", "C", "B"), nrow(v))
gr_v <- rowwise(v, gr) %>% terra::centroids()
expect_true(is_rowwise_spatvector(gr_v))
gr_v2 <- terra::aggregate(gr_v, by = "gr", count = TRUE)
expect_true(is_rowwise_spatvector(gr_v2))
# Trigger rebuild with any verb
gr_v2 <- gr_v2 %>% mutate(a2 = 1)
expect_identical(group_indices(gr_v2), c(1L, 2L, 3L))
})
test_that("Slicing can re-rowwise", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
v$gr <- rep_len(c("B", "A", "C", "B"), nrow(v))
gr_v <- rowwise(v, gr) %>% terra::centroids()
expect_true(is_rowwise_spatvector(gr_v))
gr_v2 <- gr_v[c(1:3, 7:9), ]
expect_true(is_rowwise_spatvector(gr_v2))
# Trigger rebuild with any verb
gr_v2 <- gr_v2 %>% mutate(a = 1)
# Same as
gr_v_tbl <- as_tibble(gr_v)[c(1:3, 7:9), ]
expect_identical(group_data(gr_v2), group_data(gr_v_tbl))
})
test_that("SpatSample does not re-rowwise", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
v$gr <- rep_len(c("B", "A", "C", "B"), nrow(v))
gr_v <- rowwise(v, gr)
expect_true(is_rowwise_spatvector(gr_v))
gr_v2 <- terra::spatSample(gr_v, 20)
expect_identical(nrow(gr_v2), 20)
expect_false(is_rowwise_spatvector(gr_v2))
})
test_that("Subset columns can re-rowwise", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
v$gr <- rep_len(c("B", "A", "C", "B"), nrow(v))
v$gr2 <- rep_len(c("F", "E"), nrow(v))
gr_v <- rowwise(v, gr2, gr) %>% terra::centroids()
expect_true(is_rowwise_spatvector(gr_v))
expect_identical(group_vars(gr_v), c("gr2", "gr"))
gr_v2 <- gr_v[, c("iso2", "gr")]
expect_true(is_rowwise_spatvector(gr_v2))
# Trigger rebuild with any verb
gr_v2 <- gr_v2 %>% mutate(a = 1)
expect_identical(group_vars(gr_v2), c("gr"))
# Same as
gr_v_tbl <- as_tibble(gr_v)[, c("iso2", "gr")]
expect_identical(group_data(gr_v2), group_data(gr_v_tbl))
})
test_that("Subset all columns ungroup", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
v$gr <- rep_len(c("B", "A", "C", "B"), nrow(v))
v$gr2 <- rep_len(c("F", "E"), nrow(v))
gr_v <- rowwise(v, gr2, gr) %>% terra::centroids()
expect_true(is_rowwise_spatvector(gr_v))
expect_identical(group_vars(gr_v), c("gr2", "gr"))
gr_v2 <- gr_v[, c("iso2")]
# Trigger rebuild with any verb
expect_message(gr_v2 <- gr_v2 %>% mutate(a = 1), "mixed terra and tidyterra")
expect_false(is_rowwise_spatvector(gr_v2))
expect_identical(group_vars(gr_v2), character(0))
# Same as
gr_v_tbl <- as_tibble(v)[, "iso2"]
expect_identical(group_data(gr_v2), group_data(gr_v_tbl))
})
test_that("Gives meaningful messages", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
gr_v <- rowwise(v, iso2) %>% terra::centroids()
expect_true(is_rowwise_spatvector(gr_v))
gr_v2 <- gr_v[, c("name")]
expect_snapshot(gr_v2 <- gr_v2 %>% mutate(a = 1))
})
|
/tests/testthat/test-rowwise-SpatVector-terra.R
|
permissive
|
dieghernan/tidyterra
|
R
| false
| false
| 3,867
|
r
|
test_that("Row-wise are kept with project", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
v$gr <- rep_len(c("B", "A", "C", "B"), nrow(v))
gr_v <- rowwise(v, gr)
expect_true(is_rowwise_spatvector(gr_v))
gr_v2 <- gr_v %>% terra::project("EPSG:4326")
expect_true(is_rowwise_spatvector(gr_v2))
expect_identical(group_data(gr_v), group_data(gr_v2))
})
test_that("Row-wise are kept with casting", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
v$gr <- rep_len(c("B", "A", "C", "B"), nrow(v))
gr_v <- rowwise(v, gr)
expect_true(is_rowwise_spatvector(gr_v))
gr_v2 <- gr_v %>% terra::centroids()
expect_true(is_rowwise_spatvector(gr_v2))
expect_identical(group_data(gr_v), group_data(gr_v2))
})
test_that("Aggregate can re-rowwise", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
v$gr <- rep_len(c("B", "A", "C", "B"), nrow(v))
gr_v <- rowwise(v, gr) %>% terra::centroids()
expect_true(is_rowwise_spatvector(gr_v))
gr_v2 <- terra::aggregate(gr_v, by = "gr", count = TRUE)
expect_true(is_rowwise_spatvector(gr_v2))
# Trigger rebuild with any verb
gr_v2 <- gr_v2 %>% mutate(a2 = 1)
expect_identical(group_indices(gr_v2), c(1L, 2L, 3L))
})
test_that("Slicing can re-rowwise", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
v$gr <- rep_len(c("B", "A", "C", "B"), nrow(v))
gr_v <- rowwise(v, gr) %>% terra::centroids()
expect_true(is_rowwise_spatvector(gr_v))
gr_v2 <- gr_v[c(1:3, 7:9), ]
expect_true(is_rowwise_spatvector(gr_v2))
# Trigger rebuild with any verb
gr_v2 <- gr_v2 %>% mutate(a = 1)
# Same as
gr_v_tbl <- as_tibble(gr_v)[c(1:3, 7:9), ]
expect_identical(group_data(gr_v2), group_data(gr_v_tbl))
})
test_that("SpatSample does not re-rowwise", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
v$gr <- rep_len(c("B", "A", "C", "B"), nrow(v))
gr_v <- rowwise(v, gr)
expect_true(is_rowwise_spatvector(gr_v))
gr_v2 <- terra::spatSample(gr_v, 20)
expect_identical(nrow(gr_v2), 20)
expect_false(is_rowwise_spatvector(gr_v2))
})
test_that("Subset columns can re-rowwise", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
v$gr <- rep_len(c("B", "A", "C", "B"), nrow(v))
v$gr2 <- rep_len(c("F", "E"), nrow(v))
gr_v <- rowwise(v, gr2, gr) %>% terra::centroids()
expect_true(is_rowwise_spatvector(gr_v))
expect_identical(group_vars(gr_v), c("gr2", "gr"))
gr_v2 <- gr_v[, c("iso2", "gr")]
expect_true(is_rowwise_spatvector(gr_v2))
# Trigger rebuild with any verb
gr_v2 <- gr_v2 %>% mutate(a = 1)
expect_identical(group_vars(gr_v2), c("gr"))
# Same as
gr_v_tbl <- as_tibble(gr_v)[, c("iso2", "gr")]
expect_identical(group_data(gr_v2), group_data(gr_v_tbl))
})
test_that("Subset all columns ungroup", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
v$gr <- rep_len(c("B", "A", "C", "B"), nrow(v))
v$gr2 <- rep_len(c("F", "E"), nrow(v))
gr_v <- rowwise(v, gr2, gr) %>% terra::centroids()
expect_true(is_rowwise_spatvector(gr_v))
expect_identical(group_vars(gr_v), c("gr2", "gr"))
gr_v2 <- gr_v[, c("iso2")]
# Trigger rebuild with any verb
expect_message(gr_v2 <- gr_v2 %>% mutate(a = 1), "mixed terra and tidyterra")
expect_false(is_rowwise_spatvector(gr_v2))
expect_identical(group_vars(gr_v2), character(0))
# Same as
gr_v_tbl <- as_tibble(v)[, "iso2"]
expect_identical(group_data(gr_v2), group_data(gr_v_tbl))
})
test_that("Gives meaningful messages", {
v <- terra::vect(system.file("extdata/cyl.gpkg", package = "tidyterra"))
gr_v <- rowwise(v, iso2) %>% terra::centroids()
expect_true(is_rowwise_spatvector(gr_v))
gr_v2 <- gr_v[, c("name")]
expect_snapshot(gr_v2 <- gr_v2 %>% mutate(a = 1))
})
|
#' TWIT
#'
#' @description Base function responsible for formulating GET and
#' POST requests to Twitter API's.
#'
#' @param get Logical with the default, \code{get = TRUE},
#' indicating whether the provided url should be passed along via
#' a GET or POST request.
#' @param url Character vector designed to operate like
#' parse_url and build_url functions in the httr package.
#' The easiest way to do this is to work through
#' the call-specific functions as they are designed to simplify
#' the process. However, if one were interested in reverse-
#' engingeering such a thing, I would recommend checking out
#' \code{make_url}.
#' @param \dots Further named parameters, such as config, token,
#' etc, passed on to modify_url in the httr package.
#' @param timeout Numeric, used only when streaming tweets,
#' specifying the number of seconds to stream tweets.
#' @param filename Character, used only when streaming tweets,
#' name of file to save json tweets object.
#' @note Occasionally Twitter does recommend using POST requests
#' for data retrieval calls. This is usually the case when requests
#' can involve long strings (containing up to 100 user_ids). For
#' the most part, or at least for any function-specific requests
#' (e.g., \code{get_friends}, take reflect these changes.
#' @return json response object
#' @import httr
#' @keywords internal
#' @noRd
TWIT <- function(get = TRUE, url, ..., timeout = NULL, filename = NULL) {
if (is.null(timeout)) {
if (get) {
return(GET(url, ...))
} else {
return(POST(url, ...))
}
} else {
GET(url, ...,
timeout(timeout),
write_disk(filename, overwrite = TRUE),
progress())
#error = function(e) return(NULL))
}
}
#' make_url
#'
#' @param restapi logical Default \code{restapi = TRUE}
#' indicates the provided URL components should be
#' specify Twitter's REST API. Set this to FALSE if you wish
#' to make a request URL designed for Twitter's streaming api.
#' @param query Twitter's subsetting/topic identifiers.
#' Although the httr package refers to this as "path",
#' query is used here to maintain consistency with
#' Twitter API's excellent documentation.
#' @param param Additional parameters (arguments) passed
#' along. If none, NULL (default).
#' @return URL used in httr call.
#' @keywords internal
#' @noRd
make_url <- function(restapi = TRUE, query, param = NULL) {
if (restapi) {
hostname <- "api.twitter.com"
} else {
hostname <- "stream.twitter.com"
}
structure(
list(
scheme = "https",
hostname = hostname,
port = NULL,
path = paste0("1.1/", query, ".json"),
query = param,
params = NULL,
fragment = NULL,
username = NULL,
password = NULL),
class = "url")
}
|
/R/TWIT.R
|
no_license
|
hucara/rtweet
|
R
| false
| false
| 2,809
|
r
|
#' TWIT
#'
#' @description Base function responsible for formulating GET and
#' POST requests to Twitter API's.
#'
#' @param get Logical with the default, \code{get = TRUE},
#' indicating whether the provided url should be passed along via
#' a GET or POST request.
#' @param url Character vector designed to operate like
#' parse_url and build_url functions in the httr package.
#' The easiest way to do this is to work through
#' the call-specific functions as they are designed to simplify
#' the process. However, if one were interested in reverse-
#' engingeering such a thing, I would recommend checking out
#' \code{make_url}.
#' @param \dots Further named parameters, such as config, token,
#' etc, passed on to modify_url in the httr package.
#' @param timeout Numeric, used only when streaming tweets,
#' specifying the number of seconds to stream tweets.
#' @param filename Character, used only when streaming tweets,
#' name of file to save json tweets object.
#' @note Occasionally Twitter does recommend using POST requests
#' for data retrieval calls. This is usually the case when requests
#' can involve long strings (containing up to 100 user_ids). For
#' the most part, or at least for any function-specific requests
#' (e.g., \code{get_friends}, take reflect these changes.
#' @return json response object
#' @import httr
#' @keywords internal
#' @noRd
TWIT <- function(get = TRUE, url, ..., timeout = NULL, filename = NULL) {
if (is.null(timeout)) {
if (get) {
return(GET(url, ...))
} else {
return(POST(url, ...))
}
} else {
GET(url, ...,
timeout(timeout),
write_disk(filename, overwrite = TRUE),
progress())
#error = function(e) return(NULL))
}
}
#' make_url
#'
#' @param restapi logical Default \code{restapi = TRUE}
#' indicates the provided URL components should be
#' specify Twitter's REST API. Set this to FALSE if you wish
#' to make a request URL designed for Twitter's streaming api.
#' @param query Twitter's subsetting/topic identifiers.
#' Although the httr package refers to this as "path",
#' query is used here to maintain consistency with
#' Twitter API's excellent documentation.
#' @param param Additional parameters (arguments) passed
#' along. If none, NULL (default).
#' @return URL used in httr call.
#' @keywords internal
#' @noRd
make_url <- function(restapi = TRUE, query, param = NULL) {
if (restapi) {
hostname <- "api.twitter.com"
} else {
hostname <- "stream.twitter.com"
}
structure(
list(
scheme = "https",
hostname = hostname,
port = NULL,
path = paste0("1.1/", query, ".json"),
query = param,
params = NULL,
fragment = NULL,
username = NULL,
password = NULL),
class = "url")
}
|
setwd("c:/Users/Daniel/Documents/development/papers/GA_work/code")
require(igraph)
#Chop up our data into some useful structures. we basically care about price and bundle vectors.
#They are already grouped by index, so it actually make sense to split them off separately
#We can use expressions like prices[i,] * bundles[i,] < prices[j,] * bundles[i,], and map
#over i and j.
dutchdata <- read.csv("dutch-data.csv")
plabels <- c("price_public","price_f", "price_m")
blabels <- c("public", "female", "male")
prices <- dutchdata[plabels]
bundles <- dutchdata[blabels]
spending <- bundles*prices
incomes <- rowSums(spending)
income_matrix <- matrix(data=incomes,nrow=586,ncol=586)
bundle_price_product <- data.matrix(bundles) %*% t(data.matrix(prices))
#Pretty sure to interpret this correctly, need to take the transpose. Currently read i , j is
#i is revealed to be worse that j
#
comparison_matrix <- income_matrix < bundle_price_product
#this remains the same, regardless of orientation?: (matrix algebra)
path_matrix <- comparison_matrix %*% comparison_matrix
cycle_matrix <- comparison_matrix * t(comparison_matrix)
adjacency_matrix <- cycle_matrix
cycle_matrix <- cycle_matrix * lower.tri(cycle_matrix)
cycles <- which(cycle_matrix == 1, arr.ind = TRUE)
cycledegree <- diag(path_matrix)
#Vertices WITH EDGES
vertices <- c(cycles[,1],cycles[,2])
vertices <- unique(vertices)
#Initialize graph objects
preference_graph<-graph.data.frame(cycles, directed=F)
cyclesat <- function (i){
cycledegree[i]
}
compare <- function(i, j)
{
comparison_matrix[i,j];
}
|
/chromatic.R
|
no_license
|
Concomitant/agentcoloring
|
R
| false
| false
| 1,637
|
r
|
setwd("c:/Users/Daniel/Documents/development/papers/GA_work/code")
require(igraph)
#Chop up our data into some useful structures. we basically care about price and bundle vectors.
#They are already grouped by index, so it actually make sense to split them off separately
#We can use expressions like prices[i,] * bundles[i,] < prices[j,] * bundles[i,], and map
#over i and j.
dutchdata <- read.csv("dutch-data.csv")
plabels <- c("price_public","price_f", "price_m")
blabels <- c("public", "female", "male")
prices <- dutchdata[plabels]
bundles <- dutchdata[blabels]
spending <- bundles*prices
incomes <- rowSums(spending)
income_matrix <- matrix(data=incomes,nrow=586,ncol=586)
bundle_price_product <- data.matrix(bundles) %*% t(data.matrix(prices))
#Pretty sure to interpret this correctly, need to take the transpose. Currently read i , j is
#i is revealed to be worse that j
#
comparison_matrix <- income_matrix < bundle_price_product
#this remains the same, regardless of orientation?: (matrix algebra)
path_matrix <- comparison_matrix %*% comparison_matrix
cycle_matrix <- comparison_matrix * t(comparison_matrix)
adjacency_matrix <- cycle_matrix
cycle_matrix <- cycle_matrix * lower.tri(cycle_matrix)
cycles <- which(cycle_matrix == 1, arr.ind = TRUE)
cycledegree <- diag(path_matrix)
#Vertices WITH EDGES
vertices <- c(cycles[,1],cycles[,2])
vertices <- unique(vertices)
#Initialize graph objects
preference_graph<-graph.data.frame(cycles, directed=F)
cyclesat <- function (i){
cycledegree[i]
}
compare <- function(i, j)
{
comparison_matrix[i,j];
}
|
\name{cases.suf.irr}
\alias{cases.suf.irr}
\title{
List individually irrelevant cases.
}
\description{
Function extracts individually irrelevant cases from an object of class "qca".
}
\usage{
cases.suf.irr(results, outcome, solution = 1)
}
\arguments{
\item{results}{
An object of class "qca".
}
\item{outcome}{
A character string with the name of the outcome.
}
\item{solution}{
A numeric vector where the first number indicates the number of the
solution according to the order in the "qca" object.
}
}
\details{
}
\value{
}
\references{
}
\author{
Juraj Medzihorsky
}
\note{
}
\seealso{
\code{\link[QCA:eqmcc]{eqmcc}}
}
\examples{}
\keyword{QCA}
|
/man/cases.suf.irr.Rd
|
no_license
|
jmedzihorsky/SetMethods
|
R
| false
| false
| 667
|
rd
|
\name{cases.suf.irr}
\alias{cases.suf.irr}
\title{
List individually irrelevant cases.
}
\description{
Function extracts individually irrelevant cases from an object of class "qca".
}
\usage{
cases.suf.irr(results, outcome, solution = 1)
}
\arguments{
\item{results}{
An object of class "qca".
}
\item{outcome}{
A character string with the name of the outcome.
}
\item{solution}{
A numeric vector where the first number indicates the number of the
solution according to the order in the "qca" object.
}
}
\details{
}
\value{
}
\references{
}
\author{
Juraj Medzihorsky
}
\note{
}
\seealso{
\code{\link[QCA:eqmcc]{eqmcc}}
}
\examples{}
\keyword{QCA}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get-conv-mgnbiot.R
\name{get_conv_mgnbiot}
\alias{get_conv_mgnbiot}
\title{Extract conversion factor used to transform data from nitrogen in mg to
biomass in tonnes.}
\usage{
get_conv_mgnbiot(dir = getwd(), prm_biol)
}
\arguments{
\item{dir}{Character string giving the path of the Atlantis model folder.
If data is stored in multiple folders (e.g. main model folder and output
folder) you should use 'NULL' as dir.}
\item{prm_biol}{Character string giving the filename of the biological
parameterfile. Usually "[...]biol_fishing[...].prm". In case you are using
multiple folders for your model files and outputfiles pass the complete
folder/filename string and set dir to 'NULL'.}
}
\value{
Conversion factor as numeric value.
}
\description{
Extract conversion factor used to transform data from nitrogen in mg to
biomass in tonnes.
}
\examples{
d <- system.file("extdata", "setas-model-new-becdev", package = "atlantistools")
get_conv_mgnbiot(dir = d, prm_biol = "VMPA_setas_biol_fishing_New.prm")
}
\seealso{
Other get functions: \code{\link{get_boundary}},
\code{\link{get_colpal}}, \code{\link{get_groups}}
}
|
/man/get_conv_mgnbiot.Rd
|
no_license
|
bsnouffer/atlantistools
|
R
| false
| true
| 1,197
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get-conv-mgnbiot.R
\name{get_conv_mgnbiot}
\alias{get_conv_mgnbiot}
\title{Extract conversion factor used to transform data from nitrogen in mg to
biomass in tonnes.}
\usage{
get_conv_mgnbiot(dir = getwd(), prm_biol)
}
\arguments{
\item{dir}{Character string giving the path of the Atlantis model folder.
If data is stored in multiple folders (e.g. main model folder and output
folder) you should use 'NULL' as dir.}
\item{prm_biol}{Character string giving the filename of the biological
parameterfile. Usually "[...]biol_fishing[...].prm". In case you are using
multiple folders for your model files and outputfiles pass the complete
folder/filename string and set dir to 'NULL'.}
}
\value{
Conversion factor as numeric value.
}
\description{
Extract conversion factor used to transform data from nitrogen in mg to
biomass in tonnes.
}
\examples{
d <- system.file("extdata", "setas-model-new-becdev", package = "atlantistools")
get_conv_mgnbiot(dir = d, prm_biol = "VMPA_setas_biol_fishing_New.prm")
}
\seealso{
Other get functions: \code{\link{get_boundary}},
\code{\link{get_colpal}}, \code{\link{get_groups}}
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ex23.29}
\alias{ex23.29}
\title{Data from Exercise 23.29}
\format{\Sexpr[results=rd]{bps5data:::doc_data("ex23.29") }}
\source{
\url{ http://bcs.whfreeman.com/bps5e/content/cat_030/PC-Text.zip }
}
\usage{
data("ex23.29")
}
\description{
Data from Exercise 23.29 of \emph{The Basic Practice of Statistics}, 5th edition.
}
\references{
Moore, David S. 2009. \emph{The Basic Practice of Statistics}. 5th edition. New York: W. H. Freeman.
}
\seealso{
Other datasets from Chapter 23 of \emph{BPS} 5th ed.: \code{\link{eg23.03}};
\code{\link{eg23.07}}; \code{\link{eg23.09}};
\code{\link{ex23.01}}; \code{\link{ex23.02}};
\code{\link{ex23.06}}; \code{\link{ex23.07}};
\code{\link{ex23.08}}; \code{\link{ex23.09}};
\code{\link{ex23.10}}; \code{\link{ex23.14}};
\code{\link{ex23.15}}; \code{\link{ex23.28}};
\code{\link{ex23.32}}; \code{\link{ex23.33}};
\code{\link{ex23.34}}; \code{\link{ex23.36}};
\code{\link{ex23.37}}; \code{\link{ex23.38}};
\code{\link{ex23.39}}; \code{\link{ex23.41}};
\code{\link{ex23.42}}; \code{\link{ex23.43}};
\code{\link{ex23.44}}; \code{\link{ex23.45}};
\code{\link{ex23.46}}; \code{\link{ta23.01}};
\code{\link{ta23.02}}; \code{\link{ta23.03}}
}
|
/man/ex23.29.Rd
|
no_license
|
jrnold/bps5data
|
R
| false
| false
| 1,312
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ex23.29}
\alias{ex23.29}
\title{Data from Exercise 23.29}
\format{\Sexpr[results=rd]{bps5data:::doc_data("ex23.29") }}
\source{
\url{ http://bcs.whfreeman.com/bps5e/content/cat_030/PC-Text.zip }
}
\usage{
data("ex23.29")
}
\description{
Data from Exercise 23.29 of \emph{The Basic Practice of Statistics}, 5th edition.
}
\references{
Moore, David S. 2009. \emph{The Basic Practice of Statistics}. 5th edition. New York: W. H. Freeman.
}
\seealso{
Other datasets from Chapter 23 of \emph{BPS} 5th ed.: \code{\link{eg23.03}};
\code{\link{eg23.07}}; \code{\link{eg23.09}};
\code{\link{ex23.01}}; \code{\link{ex23.02}};
\code{\link{ex23.06}}; \code{\link{ex23.07}};
\code{\link{ex23.08}}; \code{\link{ex23.09}};
\code{\link{ex23.10}}; \code{\link{ex23.14}};
\code{\link{ex23.15}}; \code{\link{ex23.28}};
\code{\link{ex23.32}}; \code{\link{ex23.33}};
\code{\link{ex23.34}}; \code{\link{ex23.36}};
\code{\link{ex23.37}}; \code{\link{ex23.38}};
\code{\link{ex23.39}}; \code{\link{ex23.41}};
\code{\link{ex23.42}}; \code{\link{ex23.43}};
\code{\link{ex23.44}}; \code{\link{ex23.45}};
\code{\link{ex23.46}}; \code{\link{ta23.01}};
\code{\link{ta23.02}}; \code{\link{ta23.03}}
}
|
library(rvest)
library(tidyverse)
library(lubridate)
library(dplyr)
library(janitor)
library(ggplot2)
url <- read_html("https://en.wikipedia.org/wiki/FC_Bayern_Munich") #Wikipedia's article about FC Bayern Munich
all_tables <- url %>% html_table(fill = TRUE) #all tables from the article
required_table <- all_tables[[20]] #"Coaches since 1963" table
required_table <- required_table[-1,]
required_table <- required_table %>% clean_names()
# Changing empty values "-" to "0"
for(i in 1:length(required_table)) {
for(j in 1:length(required_table[[i]])) {
required_table[[i]][j] <- required_table[[i]][j] %>%
stringr::str_replace("-", "0")
}
}
# Changing date format to "Y-m-d"
required_table$period <- parse_date_time(x = required_table$period,
orders = "d B Y") %>% str_remove("UTC")
required_table$period_2 <- parse_date_time(x = required_table$period_2,
orders = "d B Y") %>% str_remove("UTC")
# Finding total sum of domestic titles (summing up BL, DP, LP and SC titles)
domestic_titles <- data.frame(as.numeric(required_table$domestic), as.numeric(required_table$domestic_2),
as.numeric(required_table$domestic_3), as.numeric(required_table$domestic_4))
total_domestic_titles <- data.frame(rowSums(domestic_titles, na.rm = TRUE))
# Finding total sum of european titles (summing up CL, EL, SC and WC titles)
european_titles <- data.frame(as.numeric(required_table$european), as.numeric(required_table$european_2),
as.numeric(required_table$european_3), as.numeric(required_table$european_4))
total_european_titles <- data.frame(rowSums(european_titles, na.rm = TRUE))
# Finding total sum of worldwide titles (summing up ICC and CWC titles)
worldwide_titles <- data.frame(as.numeric(required_table$worldwide), as.numeric(required_table$worldwide_2))
total_worldwide_titles <- data.frame(rowSums(worldwide_titles, na.rm = TRUE))
# Merging "from" and "until" into one date column "period"
period_new <- paste("From", required_table$period, "until", required_table$period_2)
coaches <- data.frame(required_table$coach, period_new, required_table$major_titles, total_domestic_titles,
total_european_titles, total_worldwide_titles)
# Changing names of the columns
colnames(coaches) <- c("Coach", "Period", "Total number of titles", "Domestic titles", "European titles", "Worldwide titles")
# Ordering the table by "Total number of titles" from the largest to the smallest value
coaches <- coaches[order(as.integer(coaches$`Total number of titles`),decreasing = TRUE), ]
coaches$Period[3] <- "From 2019-11-03 until present" #changing "NA" to "present"
coaches$Coach[2] <- "Pep Guardiola" #removing additional "[172][173]"
coaches$Coach <- coaches$Coach %>% str_remove_all("[:punct:]caretaker[:punct:]") %>% trimws("r") #removing "(caretaker) "
### 1. Table of coaches of FC Bayern Munich since 1963 (with some duplicate values in "Coach" column))
View(coaches)
# Saving created data frame as a .csv file
write.csv(coaches, "~\\coaches.csv", row.names = FALSE)
# Creating a data frame that will be transformed into a new data frame without duplicates and dates ("Period" column)
coaches1 <- data.frame(coaches$Coach, as.numeric(unlist(coaches$`Total number of titles`)), as.numeric(unlist(coaches$`Domestic titles`)),
as.numeric(unlist(coaches$`European titles`)), as.numeric(unlist(coaches$`Worldwide titles`)))
# Changing names of the columns
colnames(coaches1) <- c("Coach", "Total number of titles", "Domestic titles", "European titles", "Worldwide titles")
# Finding duplicates in column "Coach", leaving only 1 value of "Coach" and summarising values of other columns
coaches1 %>%
group_by(Coach) %>%
filter(n()>1) %>%
summarise_all(sum) -> coaches_dub
# Creating a data frame with only unique values in "Coach" column (removing all duplicates)
coaches_no_dub <- filter(coaches1, Coach != "Franz Beckenbauer" & Coach != "Giovanni Trapattoni" & Coach != "Jupp Heynckes"
& Coach != "Ottmar Hitzfeld" & Coach != "Udo Lattek")
# Combining "coaches_dub" and "coaches_no_dub" into one
coaches_clean <- data.frame(rbind(coaches_dub, coaches_no_dub))
# Changing names of the columns
colnames(coaches_clean) <- c("Coach", "Total number of titles", "Domestic titles", "European titles", "Worldwide titles")
### 2. Table of coaches of FC Bayern Munich since 1963 (without duplicate values in "Coach" column and without "Period" column))
View(coaches_clean)
# Saving created data frame as a .csv file
write.csv(coaches_clean, "~\\coaches_clean.csv", row.names = FALSE)
# Transforming a data frame into the form that will be suitable for drawing a stacked bar chart (adding a new "Type of title" column)
coaches_plot <- data.frame(rbind(cbind(coaches_clean$Coach, coaches_clean$`Domestic titles`, rep("Domestic titles", 23)),
cbind(coaches_clean$Coach, coaches_clean$`European titles`, rep("European titles", 23)),
cbind(coaches_clean$Coach, coaches_clean$`Worldwide titles`, rep("Worldwide titles", 23))))
# Changing names of the columns
colnames(coaches_plot) <- c("Coach", "Number of titles", "Type of title")
# Changing zero values to NA
coaches_plot[coaches_plot == 0] <- NA
# Removing NA values from the data frame
coaches_plot <- na.omit(coaches_plot)
# Changing types of variables
Coach <- as.factor(coaches_plot[,1])
Number_of_titles <- as.numeric(coaches_plot[,2])
Type_of_title <- as.factor(coaches_plot[,3])
coaches_plot_clean <- data.frame(Coach, Number_of_titles, Type_of_title)
### 3. Table of coaches of FC Bayern Munich since 1963 that is suitable for drawing stacked bar chart
View(coaches_plot_clean)
# Saving created data frame as a .csv file
write.csv(coaches_plot_clean, "~\\coaches_plot_clean.csv", row.names = FALSE)
### 4. The stacked bar chart of transformed table of coaches of FC Bayern Munich since 1963
chart <- ggplot(coaches_plot_clean, aes(x = factor(Coach), y = Number_of_titles, fill = Type_of_title, label = Number_of_titles)) +
geom_bar(position = position_stack(), stat = "identity", width = .7) +
geom_text(aes(label = Number_of_titles), position = position_stack(vjust = 0.5),size = 4) +
scale_x_discrete(name = NULL) +
scale_y_continuous(name = "Sum of titles", limits = c(0, 14), breaks = seq(0, 14, by = 2)) +
coord_flip() +
scale_fill_brewer(palette="Reds", direction = -1) +
theme_bw() +
theme(
legend.position = "bottom",
legend.direction = "horizontal",
legend.title = element_blank(),
plot.caption = element_text(hjust = 0)
) +
ggtitle("Coaches of FC Bayern Munich since 1963") +
labs(caption = "Data Source: Wikipedia.\n\nNotes: A stacked bar chart showing the domestic, europeand and worldwide titles won by FC Bayern \nMunich head coaches from 1963 to the present. \nDomestic titles (BL - Bundesliga, DP - DFB-Pokal, LP - DFB-Ligapokal, SC - Super Cup), European ti-\ntles (CL - Champions League/European Cup, EL - Europa League/UEFA Cup, SC - UEFA Super Cup, \nWC - UEFA Cup Winners' Cup), Worldwide titles (ICC - Intercontinental Cup, CWC - FIFA Club World \nCup).")
# Saving created chart as a .png file
ggsave("~\\chart.png", chart, device = NULL, dpi = 300)
|
/Data_extraction_and_web_scrapping/Web_scraping/wiki_scraping.R
|
no_license
|
RutaKondrot/R_projects
|
R
| false
| false
| 7,549
|
r
|
library(rvest)
library(tidyverse)
library(lubridate)
library(dplyr)
library(janitor)
library(ggplot2)
url <- read_html("https://en.wikipedia.org/wiki/FC_Bayern_Munich") #Wikipedia's article about FC Bayern Munich
all_tables <- url %>% html_table(fill = TRUE) #all tables from the article
required_table <- all_tables[[20]] #"Coaches since 1963" table
required_table <- required_table[-1,]
required_table <- required_table %>% clean_names()
# Changing empty values "-" to "0"
for(i in 1:length(required_table)) {
for(j in 1:length(required_table[[i]])) {
required_table[[i]][j] <- required_table[[i]][j] %>%
stringr::str_replace("-", "0")
}
}
# Changing date format to "Y-m-d"
required_table$period <- parse_date_time(x = required_table$period,
orders = "d B Y") %>% str_remove("UTC")
required_table$period_2 <- parse_date_time(x = required_table$period_2,
orders = "d B Y") %>% str_remove("UTC")
# Finding total sum of domestic titles (summing up BL, DP, LP and SC titles)
domestic_titles <- data.frame(as.numeric(required_table$domestic), as.numeric(required_table$domestic_2),
as.numeric(required_table$domestic_3), as.numeric(required_table$domestic_4))
total_domestic_titles <- data.frame(rowSums(domestic_titles, na.rm = TRUE))
# Finding total sum of european titles (summing up CL, EL, SC and WC titles)
european_titles <- data.frame(as.numeric(required_table$european), as.numeric(required_table$european_2),
as.numeric(required_table$european_3), as.numeric(required_table$european_4))
total_european_titles <- data.frame(rowSums(european_titles, na.rm = TRUE))
# Finding total sum of worldwide titles (summing up ICC and CWC titles)
worldwide_titles <- data.frame(as.numeric(required_table$worldwide), as.numeric(required_table$worldwide_2))
total_worldwide_titles <- data.frame(rowSums(worldwide_titles, na.rm = TRUE))
# Merging "from" and "until" into one date column "period"
period_new <- paste("From", required_table$period, "until", required_table$period_2)
coaches <- data.frame(required_table$coach, period_new, required_table$major_titles, total_domestic_titles,
total_european_titles, total_worldwide_titles)
# Changing names of the columns
colnames(coaches) <- c("Coach", "Period", "Total number of titles", "Domestic titles", "European titles", "Worldwide titles")
# Ordering the table by "Total number of titles" from the largest to the smallest value
coaches <- coaches[order(as.integer(coaches$`Total number of titles`),decreasing = TRUE), ]
coaches$Period[3] <- "From 2019-11-03 until present" #changing "NA" to "present"
coaches$Coach[2] <- "Pep Guardiola" #removing additional "[172][173]"
coaches$Coach <- coaches$Coach %>% str_remove_all("[:punct:]caretaker[:punct:]") %>% trimws("r") #removing "(caretaker) "
### 1. Table of coaches of FC Bayern Munich since 1963 (with some duplicate values in "Coach" column))
View(coaches)
# Saving created data frame as a .csv file
write.csv(coaches, "~\\coaches.csv", row.names = FALSE)
# Creating a data frame that will be transformed into a new data frame without duplicates and dates ("Period" column)
coaches1 <- data.frame(coaches$Coach, as.numeric(unlist(coaches$`Total number of titles`)), as.numeric(unlist(coaches$`Domestic titles`)),
as.numeric(unlist(coaches$`European titles`)), as.numeric(unlist(coaches$`Worldwide titles`)))
# Changing names of the columns
colnames(coaches1) <- c("Coach", "Total number of titles", "Domestic titles", "European titles", "Worldwide titles")
# Finding duplicates in column "Coach", leaving only 1 value of "Coach" and summarising values of other columns
coaches1 %>%
group_by(Coach) %>%
filter(n()>1) %>%
summarise_all(sum) -> coaches_dub
# Creating a data frame with only unique values in "Coach" column (removing all duplicates)
coaches_no_dub <- filter(coaches1, Coach != "Franz Beckenbauer" & Coach != "Giovanni Trapattoni" & Coach != "Jupp Heynckes"
& Coach != "Ottmar Hitzfeld" & Coach != "Udo Lattek")
# Combining "coaches_dub" and "coaches_no_dub" into one
coaches_clean <- data.frame(rbind(coaches_dub, coaches_no_dub))
# Changing names of the columns
colnames(coaches_clean) <- c("Coach", "Total number of titles", "Domestic titles", "European titles", "Worldwide titles")
### 2. Table of coaches of FC Bayern Munich since 1963 (without duplicate values in "Coach" column and without "Period" column))
View(coaches_clean)
# Saving created data frame as a .csv file
write.csv(coaches_clean, "~\\coaches_clean.csv", row.names = FALSE)
# Transforming a data frame into the form that will be suitable for drawing a stacked bar chart (adding a new "Type of title" column)
coaches_plot <- data.frame(rbind(cbind(coaches_clean$Coach, coaches_clean$`Domestic titles`, rep("Domestic titles", 23)),
cbind(coaches_clean$Coach, coaches_clean$`European titles`, rep("European titles", 23)),
cbind(coaches_clean$Coach, coaches_clean$`Worldwide titles`, rep("Worldwide titles", 23))))
# Changing names of the columns
colnames(coaches_plot) <- c("Coach", "Number of titles", "Type of title")
# Changing zero values to NA
coaches_plot[coaches_plot == 0] <- NA
# Removing NA values from the data frame
coaches_plot <- na.omit(coaches_plot)
# Changing types of variables
Coach <- as.factor(coaches_plot[,1])
Number_of_titles <- as.numeric(coaches_plot[,2])
Type_of_title <- as.factor(coaches_plot[,3])
coaches_plot_clean <- data.frame(Coach, Number_of_titles, Type_of_title)
### 3. Table of coaches of FC Bayern Munich since 1963 that is suitable for drawing stacked bar chart
View(coaches_plot_clean)
# Saving created data frame as a .csv file
write.csv(coaches_plot_clean, "~\\coaches_plot_clean.csv", row.names = FALSE)
### 4. The stacked bar chart of transformed table of coaches of FC Bayern Munich since 1963
chart <- ggplot(coaches_plot_clean, aes(x = factor(Coach), y = Number_of_titles, fill = Type_of_title, label = Number_of_titles)) +
geom_bar(position = position_stack(), stat = "identity", width = .7) +
geom_text(aes(label = Number_of_titles), position = position_stack(vjust = 0.5),size = 4) +
scale_x_discrete(name = NULL) +
scale_y_continuous(name = "Sum of titles", limits = c(0, 14), breaks = seq(0, 14, by = 2)) +
coord_flip() +
scale_fill_brewer(palette="Reds", direction = -1) +
theme_bw() +
theme(
legend.position = "bottom",
legend.direction = "horizontal",
legend.title = element_blank(),
plot.caption = element_text(hjust = 0)
) +
ggtitle("Coaches of FC Bayern Munich since 1963") +
labs(caption = "Data Source: Wikipedia.\n\nNotes: A stacked bar chart showing the domestic, europeand and worldwide titles won by FC Bayern \nMunich head coaches from 1963 to the present. \nDomestic titles (BL - Bundesliga, DP - DFB-Pokal, LP - DFB-Ligapokal, SC - Super Cup), European ti-\ntles (CL - Champions League/European Cup, EL - Europa League/UEFA Cup, SC - UEFA Super Cup, \nWC - UEFA Cup Winners' Cup), Worldwide titles (ICC - Intercontinental Cup, CWC - FIFA Club World \nCup).")
# Saving created chart as a .png file
ggsave("~\\chart.png", chart, device = NULL, dpi = 300)
|
testthat::context("template print method")
test_that("We can show slots for the creator object", {
expect_output(print(template("creator")), "individualName: \\{\\}")
expect_output(print(template("creator")), "phone: ~")
})
test_that("template knows about internal classes too", {
expect_output(print(template("ResponsibleParty")), "individualName: \\{\\}")
})
## test serializing to XML fragment doc
#f <- "tests/testthat/creator.yml"
#creator <- yaml::read_yaml(f)
#doc <- xml2::xml_new_document()
#add_node(creator, doc, "creator")
## Write element into complete doc
#eml <- parse_eml(system.file("inst/extdata/example.xml", package="emld"))
#eml$eml$dataset$creator <- creator
#doc <- as_eml_document.list(eml)
|
/tests/testthat/test-template.R
|
no_license
|
isteves/emld
|
R
| false
| false
| 731
|
r
|
testthat::context("template print method")
test_that("We can show slots for the creator object", {
expect_output(print(template("creator")), "individualName: \\{\\}")
expect_output(print(template("creator")), "phone: ~")
})
test_that("template knows about internal classes too", {
expect_output(print(template("ResponsibleParty")), "individualName: \\{\\}")
})
## test serializing to XML fragment doc
#f <- "tests/testthat/creator.yml"
#creator <- yaml::read_yaml(f)
#doc <- xml2::xml_new_document()
#add_node(creator, doc, "creator")
## Write element into complete doc
#eml <- parse_eml(system.file("inst/extdata/example.xml", package="emld"))
#eml$eml$dataset$creator <- creator
#doc <- as_eml_document.list(eml)
|
#' @title Remove Trailing Periods
#' @description Remove trailing periods.
#' @param x A vector
#' @return A polished vector
#' @export
#' @author Leo Lahti \email{leo.lahti@@iki.fi}
#' @references See citation("fennica")
#' @examples \dontrun{x2 <- remove_trailing_periods(x)}
#' @keywords utilities
remove_trailing_periods <- function (x){
if (all(is.na(x))) {return(x)}
x <- gsub("\\.+$", "", x)
x <- gsub("^\\.+", "", x)
x
}
|
/R/remove_trailing_periods.R
|
permissive
|
COMHIS/fennica
|
R
| false
| false
| 446
|
r
|
#' @title Remove Trailing Periods
#' @description Remove trailing periods.
#' @param x A vector
#' @return A polished vector
#' @export
#' @author Leo Lahti \email{leo.lahti@@iki.fi}
#' @references See citation("fennica")
#' @examples \dontrun{x2 <- remove_trailing_periods(x)}
#' @keywords utilities
remove_trailing_periods <- function (x){
if (all(is.na(x))) {return(x)}
x <- gsub("\\.+$", "", x)
x <- gsub("^\\.+", "", x)
x
}
|
/code/preparing data/bewertung ZEIT/Identifying articles containing inflation/Identifiaktion of articles containing inflation.R
|
no_license
|
dullibri/zeit-2
|
R
| false
| false
| 4,137
|
r
| ||
# Example for Jags-Ybinom-XnomSsubjCcat-MbinomBetaOmegaKappa.R
#-------------------------------------------------------------------------------
# Optional generic preliminaries:
graphics.off() # This closes all of R's graphics windows.
rm(list=ls()) # Careful! This clears all of R's memory!
#-------------------------------------------------------------------------------
# Read the data
myData = read.csv("output-season.csv")
#-------------------------------------------------------------------------------
# Load the relevant model into R's working memory:
source("genMCMC.R")
#-------------------------------------------------------------------------------
# Optional: Specify filename root and graphical format for saving output.
# Otherwise specify as NULL or leave saveName and saveType arguments
# out of function calls.
fileNameRoot = "air-result-Season-DailyTol-"
graphFileType = "pdf"
#-------------------------------------------r------------------------------------
# Generate the MCMC chain:
startTime = proc.time()
mcmcCoda = genMCMC( data=myData ,
# The column in our data
zName="DailyTol", NName="Dummy", sName="DateStation", cName="Season",
numSavedSteps=500 , saveName=fileNameRoot ,
thinSteps=20)
stopTime = proc.time()
elapsedTime = stopTime - startTime
show(elapsedTime)
#-------------------------------------------------------------------------------
# Display diagnostics of chain, for specified parameters:
parameterNames = varnames(mcmcCoda) # get all parameter names for reference
#for ( parName in c("omega[1]","omegaO","kappa[1]","kappaO","theta[1]") ) {
# diagMCMC( codaObject=mcmcCoda , parName=parName ,
# saveName=fileNameRoot , saveType=graphFileType )
#}
#-------------------------------------------------------------------------------
# Get summary statistics of chain:
summaryInfo = smryMCMC( mcmcCoda , compVal=NULL ,
#diffSVec=c(75,156, 159,844) ,
diffCVec=c(1,2,3,4) , # Four Season
compValDiff=0.0 , saveName=fileNameRoot )
# Display posterior information:
plotMCMC( mcmcCoda , data=myData ,
# The column in our data
zName="DailyTol", NName="Dummy", sName="Date", cName="Season",
compVal=NULL ,
diffCList=list( c("Spring","Summer") ,
c("Summer","Autumn") ,
c("Autumn","Winter") ,# Compare Spring and Summer
c("Winter","Spring") ) , # Compare Autumn and Winter
#diffSList=list( c("2014/01/04","2014/06/13") ), # Compare two dates
# c("Mike Leake","Wandy Rodriguez") ,
#c("Andrew McCutchen","Brett Jackson") ,
#c("ShinSoo Choo","Ichiro Suzuki") ) ,
compValDiff=0.0, #ropeDiff = c(-0.05,0.05) ,
saveName=fileNameRoot , saveType=graphFileType )
#-------------------------------------------------------------------------------
|
/genMCMC-script.R
|
no_license
|
nature-sky/Air
|
R
| false
| false
| 3,027
|
r
|
# Example for Jags-Ybinom-XnomSsubjCcat-MbinomBetaOmegaKappa.R
#-------------------------------------------------------------------------------
# Optional generic preliminaries:
graphics.off() # This closes all of R's graphics windows.
rm(list=ls()) # Careful! This clears all of R's memory!
#-------------------------------------------------------------------------------
# Read the data
myData = read.csv("output-season.csv")
#-------------------------------------------------------------------------------
# Load the relevant model into R's working memory:
source("genMCMC.R")
#-------------------------------------------------------------------------------
# Optional: Specify filename root and graphical format for saving output.
# Otherwise specify as NULL or leave saveName and saveType arguments
# out of function calls.
fileNameRoot = "air-result-Season-DailyTol-"
graphFileType = "pdf"
#-------------------------------------------r------------------------------------
# Generate the MCMC chain:
startTime = proc.time()
mcmcCoda = genMCMC( data=myData ,
# The column in our data
zName="DailyTol", NName="Dummy", sName="DateStation", cName="Season",
numSavedSteps=500 , saveName=fileNameRoot ,
thinSteps=20)
stopTime = proc.time()
elapsedTime = stopTime - startTime
show(elapsedTime)
#-------------------------------------------------------------------------------
# Display diagnostics of chain, for specified parameters:
parameterNames = varnames(mcmcCoda) # get all parameter names for reference
#for ( parName in c("omega[1]","omegaO","kappa[1]","kappaO","theta[1]") ) {
# diagMCMC( codaObject=mcmcCoda , parName=parName ,
# saveName=fileNameRoot , saveType=graphFileType )
#}
#-------------------------------------------------------------------------------
# Get summary statistics of chain:
summaryInfo = smryMCMC( mcmcCoda , compVal=NULL ,
#diffSVec=c(75,156, 159,844) ,
diffCVec=c(1,2,3,4) , # Four Season
compValDiff=0.0 , saveName=fileNameRoot )
# Display posterior information:
plotMCMC( mcmcCoda , data=myData ,
# The column in our data
zName="DailyTol", NName="Dummy", sName="Date", cName="Season",
compVal=NULL ,
diffCList=list( c("Spring","Summer") ,
c("Summer","Autumn") ,
c("Autumn","Winter") ,# Compare Spring and Summer
c("Winter","Spring") ) , # Compare Autumn and Winter
#diffSList=list( c("2014/01/04","2014/06/13") ), # Compare two dates
# c("Mike Leake","Wandy Rodriguez") ,
#c("Andrew McCutchen","Brett Jackson") ,
#c("ShinSoo Choo","Ichiro Suzuki") ) ,
compValDiff=0.0, #ropeDiff = c(-0.05,0.05) ,
saveName=fileNameRoot , saveType=graphFileType )
#-------------------------------------------------------------------------------
|
## calculates the inverse of matrix, caches its result
##creates a special "matrix", which is really a list containing a function to
##set the value of the matrix
##get the value of the matrix
##set the value of the inverse
##get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## calculate the inverse of special matrix created. if inverse already exists,
##it simply returns the cached result
cacheSolve <- function(x, ...) {
print("caching solve")
## Return a matrix that is the inverse of 'x'
m <<- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <<- solve(data,...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
sridhar1982/ProgrammingAssignment2
|
R
| false
| false
| 1,008
|
r
|
## calculates the inverse of matrix, caches its result
##creates a special "matrix", which is really a list containing a function to
##set the value of the matrix
##get the value of the matrix
##set the value of the inverse
##get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## calculate the inverse of special matrix created. if inverse already exists,
##it simply returns the cached result
cacheSolve <- function(x, ...) {
print("caching solve")
## Return a matrix that is the inverse of 'x'
m <<- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <<- solve(data,...)
x$setinverse(m)
m
}
|
#Live Session 4 For Live Session Web Scraping Code
library(XML) #xml_Parse
library(dplyr)
library(tidyr)
library(stringi)
library(rvest) #html_table, html_node
library(ggplot2)
library(RCurl) #getURL
#Basics of Scraping XML
# XML
data <-getURL("https://www.w3schools.com/xml/simple.xml")
doc <- xmlParse(data)
names <- xpathSApply(doc,"//name",xmlValue)
price <- xpathSApply(doc,"//price",xmlValue)
description <- xpathSApply(doc,"//description",xmlValue)
bfasts = data.frame(names,price,description)
bfasts
bfasts$description
length(grep("covered",bfasts$description))
grepl("covered",bfasts$description)
sum(grepl("covered",bfasts$description))
which(grepl("covered",bfasts$description))
# rvest
hp<-read_html("https://www.w3schools.com/xml/simple.xml")
hp_nameR <- html_nodes(hp,"name")
hp_priceR <- html_nodes(hp,"price")
hp_descR <- html_nodes(hp,"description")
hp_nameR
hp_name = stri_sub(hp_nameR,7,-8)
hp_name
hp_price = stri_sub(hp_priceR,8,-9)
hp_price
hp_desc = stri_sub(hp_descR,14,-15)
hp_desc
bfast = data.frame(hp_name,hp_price,hp_desc)
grep("toast", bfast$hp_desc)
grepl("toast",bfast$hp_desc)
sum(grepl("toast",bfast$hp_desc))
# Scraping xml
#Breakout 1
#using xml ... what is the problem?
data <-getURL("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml")
doc <- xmlParse(data)
names <- xpathSApply(doc,"//name",xmlValue)
zipcodes <- xpathSApply(doc,"//zipcode",xmlValue)
councildistrict <- xpathSApply(doc,"//councildistrict",xmlValue)
rests = data.frame(names,zipcodes,councildistrict)
dim(rests)
restsDTown = rests[which(rests$councildistrict == "11"),]
Rr("Sushi",rests$names,ignore.case = T)
#Using rvest
hp<-read_html("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml")
hp_name2 <- html_nodes(hp,"name")
hp_zipcode2 <- html_nodes(hp,"zipcode")
hp_councildistrict2 <- html_nodes(hp,"councildistrict")
hp_name2 = stri_sub(hp_name2,7,-8)
hp_zipcode2 = stri_sub(hp_zipcode2,10,-11)
hp_councildistrict2 = stri_sub(hp_councildistrict2,18,-19)
hp_zipcode2 = as.numeric(hp_zipcode2)
hp_councildistrict2 = as.numeric(hp_councildistrict2)
#How many restaurants total
#restByDist = hist(hp_councildistrict2)
#barplot(height = restByDist$counts, names = (as.character(seq(1,13,1))),xlab = "Council District",ylab = "Number of Restaurants")
#barplot(height = restByDist$counts, names = (as.character(seq(1,13,1))),xlab = "Number of Restaurants",ylab = "Council District", horiz = TRUE)
RestaurantDF = data.frame(Name = hp_name2, Zip = hp_zipcode2, District = hp_councildistrict2)
RestaurantDF %>% ggplot(aes(x = District, fill = factor(District))) + geom_bar(stat = "count")
RestaurantDF %>% ggplot(aes(x = factor(District), fill = factor(District))) + geom_bar(stat = "count")
#How many Sushi Restaurants?
restsDTown = RestaurantDF %>% filter(District == "11")
grep("Sushi",restsDTown$Name,ignore.case = T)
grep("[Sushi]",restsDTown$Name,ignore.case = T)
# Break Out 2
#Harry Potter
#1A / 1B
hp<-read_html("http://www.imdb.com/title/tt1201607/fullcredits?ref_=tt_ql_1")
hp_table<-html_nodes(hp,"table")
derp<-html_table(hp_table)
# Find the right table
derp[3]
#1C - Cleaning
a<-data.frame(derp[3])
names(a) <- c("Blank", "Actor", "Blank2","Character")
df<-a[2:length(a$Actor),c("Actor", "Character")]
df$Character[10] <- "Griphook / Professor Filius Flitwick"
# 1D -Edit The Cast List
b<-df %>%
slice(-92) %>% # Removes the row that is just noting the rest is alphabetical
separate(Actor, into=c("FirstNames", "Surname"), sep="[ ](?=[^ ]+$)") # Separates the Last Name
#1E
head(b, 10)
#Stars
stars<-read_html("http://www.espn.com/nhl/team/roster/_/name/dal/dallas-stars")
stars_table<-html_nodes(stars, "table")
stars_dfs<-html_table(stars_table, fill = TRUE)
Rost1 = stars_dfs[[3]]
Rost2 = stars_dfs[[6]]
Rost3 = stars_dfs[[9]]
Rost4 = stars_dfs[[12]]
Rost5 = stars_dfs[[15]]
Roster = rbind(Rost1,Rost2)
Roster = rbind(Roster,Rost3)
Roster = rbind(Roster, Rost4)
Roster = rbind(Roster, Rost5)
# API
install.packages("WDI")
## Install and load package
library(WDI)
## Search for fertilizer consumption data
WDIsearch("Data")
## Use indicator number to gather data
FertConsumpData <- WDI(indicator="AG.CON.FERT.ZS")
MaleOFSD <- WDI(country = "US", indicator="UIS.ROFST.H.2.Q3.M", start = 2017, end = 2018)
#twitteR
api_key = "rkclWXRZYkZYZbdVdcvzP2ZcN "
api_secret = "ymjMYAkXhXVAL2ci4vTKi3ZFKg72abSKlzBNZq0y6rkXXltsdY"
access_token = "1105487041691815937-IIPDKMmlfGIuRvJgrRfCgiRLtQAfII"
access_token_secret = "mafeLvPRrI8SKBvyq4SJVozfx2wDD0rRkOrASfCoRJUyy"
#Load twitteR
library(twitteR)
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
#Get tweets
tweets = searchTwitter("$appl", n = 10, lang = "en")
#Locations
trend = availableTrendLocations()
#Get Trends for Location
getTrends(395269) # Caracas, Venezuela
getTrends(2487889) # San Diego, California
getTrends(44418) # London, England
getTrends(2388929) # Dallas, US
DallasTrends = getTrends(2388929) %>% select(name) # Dallas, US
DallasTrends[1:10,]
# World Bank Development Indicators
#Useful URL in explainging WDI XML and JSON data formats.
#https://datahelpdesk.worldbank.org/knowledgebase/articles/898599-indicator-api-queries
#Goal 1: Create a bar chart of topics relating to gdp.
#search for reports with "gdp" in the description
results = as.data.frame(WDIsearch("gdp"))
#Many reports have more than 4 parts of the indicator
# This is in contrast to this documentation:
#https://datahelpdesk.worldbank.org/knowledgebase/articles/201175-how-does-the-world-bank-code-its-indicators
# We use a new function from a new package that we will cover later: str_count
# This function is in the stringr package and simply counts the number of a specific
# character ("\\.") in a given string (indicator)
# The \\ means to literally look for the '.' which means something else in this context.
#This line will filter the data frame to leave only those with 4 pieces in the indicator.
resultsGoodIndicator = results %>% filter(str_count(indicator,"\\.")==3)
#Check out the new data frame with only 4 piece indicators
resultsGoodIndicator$indicator
# Break the indicator code up into 4 distinct columns.
resultsGoodIndicator = as.data.frame(resultsGoodIndicator) %>% separate(indicator,c("topic","general","specific","extension"))
#plot the topic column in a bar chart to see the frequency of each topic.
#compare the expenditure (NE) and the income (NY)
resultsGoodIndicator %>% ggplot(aes(x = topic, fill = topic)) + geom_bar()
#Goal 2: Plot GDP (NY and GDP) per capita (PCAP) of Mexico, Canada and the US in constant US dollars (KD)
dat = WDI(indicator='NY.GDP.PCAP.KD', country=c('MX','CA','US'), start=1960, end=2012)
head(dat)
library(ggplot2)
ggplot(dat, aes(x = year, y = NY.GDP.PCAP.KD, color=country)) + geom_line() +
xlab('Year') + ylab('GDP per capita')
#API and json code
######################
# Loading the Data from the NYT API
######################
library(tidyr)
library(plyr)
library(jsonlite)
library(dplyr)
library(tidyverse)
NYTIMES_KEY = "OG89fUubcS8FXofVrLA4dmIOHh5omiFa" #Your Key Here … get from NYT API website
# Let's set some parameters
term <- "Central+Park+Jogger" # Need to use + to string together separate words
begin_date <- "19890419"
end_date <- "19890521"
baseurl <- paste0("http://api.nytimes.com/svc/search/v2/articlesearch.json?q=",term,
"&begin_date=",begin_date,"&end_date=",end_date,
"&facet_filter=true&api-key=",NYTIMES_KEY, sep="")
baseurl
initialQuery <- jsonlite::fromJSON(baseurl)
maxPages <- round((initialQuery$response$meta$hits[1] / 10)-1)
pages <- list()
for(i in 0:maxPages){
nytSearch <- jsonlite::fromJSON(paste0(baseurl, "&page=", i), flatten = TRUE) %>% data.frame()
message("Retrieving page ", i)
pages[[i+1]] <- nytSearch
Sys.sleep(7)
}
allNYTSearch <- rbind_pages(pages)
#Segmentation
allNYTSearch %>%
ggplot() +
geom_bar(aes(x=response.docs.type_of_material, fill=response.docs.type_of_material), stat = "count") + coord_flip()
# Visualize coverage by section
allNYTSearch %>%
group_by(response.docs.type_of_material) %>%
dplyr::summarize(count=n()) %>%
mutate(percent = (count / sum(count))*100) %>%
ggplot() +
geom_bar(aes(y=percent, x=response.docs.type_of_material, fill=response.docs.type_of_material), stat = "identity") + coord_flip()
|
/Live Assignments Unit 4/R Code for Unit 4 Live Session V2.R
|
no_license
|
Adeelq87/6306-Doing-Data-Science
|
R
| false
| false
| 8,424
|
r
|
#Live Session 4 For Live Session Web Scraping Code
library(XML) #xml_Parse
library(dplyr)
library(tidyr)
library(stringi)
library(rvest) #html_table, html_node
library(ggplot2)
library(RCurl) #getURL
#Basics of Scraping XML
# XML
data <-getURL("https://www.w3schools.com/xml/simple.xml")
doc <- xmlParse(data)
names <- xpathSApply(doc,"//name",xmlValue)
price <- xpathSApply(doc,"//price",xmlValue)
description <- xpathSApply(doc,"//description",xmlValue)
bfasts = data.frame(names,price,description)
bfasts
bfasts$description
length(grep("covered",bfasts$description))
grepl("covered",bfasts$description)
sum(grepl("covered",bfasts$description))
which(grepl("covered",bfasts$description))
# rvest
hp<-read_html("https://www.w3schools.com/xml/simple.xml")
hp_nameR <- html_nodes(hp,"name")
hp_priceR <- html_nodes(hp,"price")
hp_descR <- html_nodes(hp,"description")
hp_nameR
hp_name = stri_sub(hp_nameR,7,-8)
hp_name
hp_price = stri_sub(hp_priceR,8,-9)
hp_price
hp_desc = stri_sub(hp_descR,14,-15)
hp_desc
bfast = data.frame(hp_name,hp_price,hp_desc)
grep("toast", bfast$hp_desc)
grepl("toast",bfast$hp_desc)
sum(grepl("toast",bfast$hp_desc))
# Scraping xml
#Breakout 1
#using xml ... what is the problem?
data <-getURL("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml")
doc <- xmlParse(data)
names <- xpathSApply(doc,"//name",xmlValue)
zipcodes <- xpathSApply(doc,"//zipcode",xmlValue)
councildistrict <- xpathSApply(doc,"//councildistrict",xmlValue)
rests = data.frame(names,zipcodes,councildistrict)
dim(rests)
restsDTown = rests[which(rests$councildistrict == "11"),]
Rr("Sushi",rests$names,ignore.case = T)
#Using rvest
hp<-read_html("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml")
hp_name2 <- html_nodes(hp,"name")
hp_zipcode2 <- html_nodes(hp,"zipcode")
hp_councildistrict2 <- html_nodes(hp,"councildistrict")
hp_name2 = stri_sub(hp_name2,7,-8)
hp_zipcode2 = stri_sub(hp_zipcode2,10,-11)
hp_councildistrict2 = stri_sub(hp_councildistrict2,18,-19)
hp_zipcode2 = as.numeric(hp_zipcode2)
hp_councildistrict2 = as.numeric(hp_councildistrict2)
#How many restaurants total
#restByDist = hist(hp_councildistrict2)
#barplot(height = restByDist$counts, names = (as.character(seq(1,13,1))),xlab = "Council District",ylab = "Number of Restaurants")
#barplot(height = restByDist$counts, names = (as.character(seq(1,13,1))),xlab = "Number of Restaurants",ylab = "Council District", horiz = TRUE)
RestaurantDF = data.frame(Name = hp_name2, Zip = hp_zipcode2, District = hp_councildistrict2)
RestaurantDF %>% ggplot(aes(x = District, fill = factor(District))) + geom_bar(stat = "count")
RestaurantDF %>% ggplot(aes(x = factor(District), fill = factor(District))) + geom_bar(stat = "count")
#How many Sushi Restaurants?
restsDTown = RestaurantDF %>% filter(District == "11")
grep("Sushi",restsDTown$Name,ignore.case = T)
grep("[Sushi]",restsDTown$Name,ignore.case = T)
# Break Out 2
#Harry Potter
#1A / 1B
hp<-read_html("http://www.imdb.com/title/tt1201607/fullcredits?ref_=tt_ql_1")
hp_table<-html_nodes(hp,"table")
derp<-html_table(hp_table)
# Find the right table
derp[3]
#1C - Cleaning
a<-data.frame(derp[3])
names(a) <- c("Blank", "Actor", "Blank2","Character")
df<-a[2:length(a$Actor),c("Actor", "Character")]
df$Character[10] <- "Griphook / Professor Filius Flitwick"
# 1D -Edit The Cast List
b<-df %>%
slice(-92) %>% # Removes the row that is just noting the rest is alphabetical
separate(Actor, into=c("FirstNames", "Surname"), sep="[ ](?=[^ ]+$)") # Separates the Last Name
#1E
head(b, 10)
#Stars
stars<-read_html("http://www.espn.com/nhl/team/roster/_/name/dal/dallas-stars")
stars_table<-html_nodes(stars, "table")
stars_dfs<-html_table(stars_table, fill = TRUE)
Rost1 = stars_dfs[[3]]
Rost2 = stars_dfs[[6]]
Rost3 = stars_dfs[[9]]
Rost4 = stars_dfs[[12]]
Rost5 = stars_dfs[[15]]
Roster = rbind(Rost1,Rost2)
Roster = rbind(Roster,Rost3)
Roster = rbind(Roster, Rost4)
Roster = rbind(Roster, Rost5)
# API
install.packages("WDI")
## Install and load package
library(WDI)
## Search for fertilizer consumption data
WDIsearch("Data")
## Use indicator number to gather data
FertConsumpData <- WDI(indicator="AG.CON.FERT.ZS")
MaleOFSD <- WDI(country = "US", indicator="UIS.ROFST.H.2.Q3.M", start = 2017, end = 2018)
#twitteR
api_key = "rkclWXRZYkZYZbdVdcvzP2ZcN "
api_secret = "ymjMYAkXhXVAL2ci4vTKi3ZFKg72abSKlzBNZq0y6rkXXltsdY"
access_token = "1105487041691815937-IIPDKMmlfGIuRvJgrRfCgiRLtQAfII"
access_token_secret = "mafeLvPRrI8SKBvyq4SJVozfx2wDD0rRkOrASfCoRJUyy"
#Load twitteR
library(twitteR)
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
#Get tweets
tweets = searchTwitter("$appl", n = 10, lang = "en")
#Locations
trend = availableTrendLocations()
#Get Trends for Location
getTrends(395269) # Caracas, Venezuela
getTrends(2487889) # San Diego, California
getTrends(44418) # London, England
getTrends(2388929) # Dallas, US
DallasTrends = getTrends(2388929) %>% select(name) # Dallas, US
DallasTrends[1:10,]
# World Bank Development Indicators
#Useful URL in explainging WDI XML and JSON data formats.
#https://datahelpdesk.worldbank.org/knowledgebase/articles/898599-indicator-api-queries
#Goal 1: Create a bar chart of topics relating to gdp.
#search for reports with "gdp" in the description
results = as.data.frame(WDIsearch("gdp"))
#Many reports have more than 4 parts of the indicator
# This is in contrast to this documentation:
#https://datahelpdesk.worldbank.org/knowledgebase/articles/201175-how-does-the-world-bank-code-its-indicators
# We use a new function from a new package that we will cover later: str_count
# This function is in the stringr package and simply counts the number of a specific
# character ("\\.") in a given string (indicator)
# The \\ means to literally look for the '.' which means something else in this context.
#This line will filter the data frame to leave only those with 4 pieces in the indicator.
resultsGoodIndicator = results %>% filter(str_count(indicator,"\\.")==3)
#Check out the new data frame with only 4 piece indicators
resultsGoodIndicator$indicator
# Break the indicator code up into 4 distinct columns.
resultsGoodIndicator = as.data.frame(resultsGoodIndicator) %>% separate(indicator,c("topic","general","specific","extension"))
#plot the topic column in a bar chart to see the frequency of each topic.
#compare the expenditure (NE) and the income (NY)
resultsGoodIndicator %>% ggplot(aes(x = topic, fill = topic)) + geom_bar()
#Goal 2: Plot GDP (NY and GDP) per capita (PCAP) of Mexico, Canada and the US in constant US dollars (KD)
dat = WDI(indicator='NY.GDP.PCAP.KD', country=c('MX','CA','US'), start=1960, end=2012)
head(dat)
library(ggplot2)
ggplot(dat, aes(x = year, y = NY.GDP.PCAP.KD, color=country)) + geom_line() +
xlab('Year') + ylab('GDP per capita')
#API and json code
######################
# Loading the Data from the NYT API
######################
library(tidyr)
library(plyr)
library(jsonlite)
library(dplyr)
library(tidyverse)
NYTIMES_KEY = "OG89fUubcS8FXofVrLA4dmIOHh5omiFa" #Your Key Here … get from NYT API website
# Let's set some parameters
term <- "Central+Park+Jogger" # Need to use + to string together separate words
begin_date <- "19890419"
end_date <- "19890521"
baseurl <- paste0("http://api.nytimes.com/svc/search/v2/articlesearch.json?q=",term,
"&begin_date=",begin_date,"&end_date=",end_date,
"&facet_filter=true&api-key=",NYTIMES_KEY, sep="")
baseurl
initialQuery <- jsonlite::fromJSON(baseurl)
maxPages <- round((initialQuery$response$meta$hits[1] / 10)-1)
pages <- list()
for(i in 0:maxPages){
nytSearch <- jsonlite::fromJSON(paste0(baseurl, "&page=", i), flatten = TRUE) %>% data.frame()
message("Retrieving page ", i)
pages[[i+1]] <- nytSearch
Sys.sleep(7)
}
allNYTSearch <- rbind_pages(pages)
#Segmentation
allNYTSearch %>%
ggplot() +
geom_bar(aes(x=response.docs.type_of_material, fill=response.docs.type_of_material), stat = "count") + coord_flip()
# Visualize coverage by section
allNYTSearch %>%
group_by(response.docs.type_of_material) %>%
dplyr::summarize(count=n()) %>%
mutate(percent = (count / sum(count))*100) %>%
ggplot() +
geom_bar(aes(y=percent, x=response.docs.type_of_material, fill=response.docs.type_of_material), stat = "identity") + coord_flip()
|
setwd("C://git_projects//datasciencecoursera//R_Programming//ProgrammingAssignment3//Quiz1//hw1_data.csv")
getwd()
outcome_data <- read.csv2("hw1_data.csv",sep=",",colClasses="character")
outcome_data[1:3]
head(outcome_data,2)
tail(outcome_data,2)
x <- as.numeric(outcome_data$Ozone)
mean
colMeans(outcome_data$Ozone, na.rm = TRUE, dims = 2)
mean(mean, na.rm = TRUE)
sub <- subset(outcome_data, Temp > 90 & Ozone > 31, select = c(Ozone, Temp, Solar.R))
mean <- as.numeric(sub$Solar.R)
mean(mean, na.rm = TRUE)
sub <- c(outcome_data$Ozone, outcome_data$Temp, outcome_data$Solar.R)
sub <- subset(outcome_data, Month > 90 & Ozone > 31, select = c(Ozone, Temp, Solar.R))
|
/R_Programming/Quiz1/quiz1_dataset.R
|
no_license
|
mattmoyer4444/datasciencecoursera
|
R
| false
| false
| 683
|
r
|
setwd("C://git_projects//datasciencecoursera//R_Programming//ProgrammingAssignment3//Quiz1//hw1_data.csv")
getwd()
outcome_data <- read.csv2("hw1_data.csv",sep=",",colClasses="character")
outcome_data[1:3]
head(outcome_data,2)
tail(outcome_data,2)
x <- as.numeric(outcome_data$Ozone)
mean
colMeans(outcome_data$Ozone, na.rm = TRUE, dims = 2)
mean(mean, na.rm = TRUE)
sub <- subset(outcome_data, Temp > 90 & Ozone > 31, select = c(Ozone, Temp, Solar.R))
mean <- as.numeric(sub$Solar.R)
mean(mean, na.rm = TRUE)
sub <- c(outcome_data$Ozone, outcome_data$Temp, outcome_data$Solar.R)
sub <- subset(outcome_data, Month > 90 & Ozone > 31, select = c(Ozone, Temp, Solar.R))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.