content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' Load data from a 10x Genomics Visium experiment and make it spatialLIBD-ready
#'
#' This function expands [SpatialExperiment::read10xVisium()] to include
#' analysis results from SpaceRanger by 10x Genomics as well as add information
#' needed by `run_app()` to visualize the data with the `spatialLIBD` shiny
#' web application.
#'
#' @param samples Passed to [SpatialExperiment::read10xVisium()].
#' @param sample_id Passed to [SpatialExperiment::read10xVisium()].
#' @param type Passed to [SpatialExperiment::read10xVisium()].
#' @param data Passed to [SpatialExperiment::read10xVisium()].
#' @param images Passed to [SpatialExperiment::read10xVisium()].
#' @param load Passed to [SpatialExperiment::read10xVisium()].
#' @param reference_gtf A `character(1)` specifying the path to the reference
#' `genes.gtf` file. If not specified, it will be automatically inferred from
#' the `web_summary.html` file for the first `samples`.
#' @param chrM A `character(1)` specifying the chromosome name of the
#' mitochondrial chromosome. Defaults to `chrM`.
#' @param gtf_cols A `character()` specifying which columns to keep from the GTF
#' file. `"gene_name"` and `"gene_id"` have to be included in `gtf_cols`.
#' @param verbose A `logical(1)` specifying whether to show progress updates.
#'
#' @return A [SpatialExperiment][SpatialExperiment-class] object with the
#' clustering and dimension reduction (projection) results from SpaceRanger by
#' 10x Genomics as well as other information used by `run_app()` for visualzing
#' the gene expression data.
#' @export
#' @importFrom SpatialExperiment read10xVisium
#' @importFrom rtracklayer import
#' @importMethodsFrom Matrix colSums
#' @importFrom SummarizedExperiment "rowRanges<-" "rowData<-"
#' @importFrom S4Vectors "mcols<-" mcols
#' @importFrom BiocGenerics which
#' @importFrom GenomicRanges seqnames
#' @family Utility functions for reading data from SpaceRanger output by 10x
#' Genomics
#'
#' @examples
#' ## See 'Using spatialLIBD with 10x Genomics public datasets' for
#' ## a full example using this function.
#' if (interactive()) {
#' browseVignettes(package = "spatialLIBD")
#' }
#'
#' ## Note that ?SpatialExperiment::read10xVisium doesn't include all the files
#' ## we need to illustrate read10xVisiumWrapper().
read10xVisiumWrapper <- function(samples = "",
sample_id = paste0("sample", sprintf("%02d", seq_along(samples))),
type = c("HDF5", "sparse"),
data = c("filtered", "raw"),
images = c("lowres", "hires", "detected", "aligned"),
load = TRUE,
reference_gtf = NULL,
chrM = "chrM",
gtf_cols = c("source", "type", "gene_id", "gene_version", "gene_name", "gene_type"),
verbose = TRUE) {
stopifnot(all(c("gene_name", "gene_id") %in% gtf_cols))
if (missing(reference_gtf)) {
summary_file <- file.path(samples[1], "web_summary.html")
web <- readLines(summary_file)
reference_path <- gsub('.*"', "", regmatches(web, regexpr('\\["Reference Path", *"[/|A-z|0-9|-]+', web)))
reference_gtf <- file.path(reference_path, "genes", "genes.gtf")
}
reference_gtf <- reference_gtf[file.exists(reference_gtf)]
if (length(reference_gtf) > 1) {
stop("More than one 'reference_gtf' was provided or detected. Manually specify the path to just one 'reference_gtf'. If different GTF files were used, then different genes will have been quantified and thus cannot be merged naively into a single SpatialExperiment object. If that's the case, we recommend you build separate SPE objects based on the different 'reference_gtf' files used.", call. = FALSE)
} else if (length(reference_gtf) == 0) {
stop("No 'reference_gtf' files were detected. Please check that the files are available.", call. = FALSE)
}
if (verbose) message(Sys.time(), " SpatialExperiment::read10xVisium: reading basic data from SpaceRanger")
spe <- SpatialExperiment::read10xVisium(
samples = samples,
sample_id = sample_id,
type = type,
data = data,
images = images,
load = load
)
if (verbose) message(Sys.time(), " read10xVisiumAnalysis: reading analysis output from SpaceRanger")
visium_analysis <- read10xVisiumAnalysis(
samples = samples,
sample_id = sample_id
)
if (verbose) message(Sys.time(), " add10xVisiumAnalysis: adding analysis output from SpaceRanger")
spe <- add10xVisiumAnalysis(spe, visium_analysis)
## Read in the gene information from the annotation GTF file
if (verbose) message(Sys.time(), " rtracklayer::import: reading the reference GTF file")
gtf <- rtracklayer::import(reference_gtf)
gtf <- gtf[gtf$type == "gene"]
names(gtf) <- gtf$gene_id
## Match the genes
if (verbose) message(Sys.time(), " adding gene information to the SPE object")
match_genes <- match(rownames(spe), gtf$gene_id)
if (all(is.na(match_genes))) {
## Protect against scenario where one set has GENCODE IDs and the other one has ENSEMBL IDs.
warning("Gene IDs did not match. This typically happens when you are not using the same GTF file as the one that was used by SpaceRanger. For example, one file uses GENCODE IDs and the other one ENSEMBL IDs. read10xVisiumWrapper() will try to convert them to ENSEMBL IDs.", call. = FALSE)
match_genes <- match(gsub("\\..*", "", rownames(spe)), gsub("\\..*", "", gtf$gene_id))
}
if (any(is.na(match_genes))) {
warning("Dropping ", sum(is.na(match_genes)), " out of ", length(match_genes), " genes for which we don't have information on the reference GTF file. This typically happens when you are not using the same GTF file as the one that was used by SpaceRanger.", call. = FALSE)
## Drop the few genes for which we don't have information
spe <- spe[!is.na(match_genes), ]
match_genes <- match_genes[!is.na(match_genes)]
}
## Keep only some columns from the gtf
mcols(gtf) <- mcols(gtf)[, gtf_cols[gtf_cols %in% colnames(mcols(gtf))]]
## Add the gene info to our SPE object
rowRanges(spe) <- gtf[match_genes]
## Add information used by spatialLIBD
if (verbose) message(Sys.time(), " adding information used by spatialLIBD")
spe <- add_key(spe)
spe$sum_umi <- colSums(counts(spe))
spe$sum_gene <- colSums(counts(spe) > 0)
rowData(spe)$gene_search <- paste0(rowData(spe)$gene_name, "; ", rowData(spe)$gene_id)
is_mito <- which(seqnames(spe) == chrM)
spe$expr_chrM <- colSums(counts(spe)[is_mito, , drop = FALSE])
spe$expr_chrM_ratio <- spe$expr_chrM / spe$sum_umi
## Add a variable for saving the manual annotations
spe$ManualAnnotation <- "NA"
## Done!
return(spe)
}
| /R/read10xVisiumWrapper.R | no_license | LieberInstitute/spatialLIBD | R | false | false | 6,715 | r | #' Load data from a 10x Genomics Visium experiment and make it spatialLIBD-ready
#'
#' This function expands [SpatialExperiment::read10xVisium()] to include
#' analysis results from SpaceRanger by 10x Genomics as well as add information
#' needed by `run_app()` to visualize the data with the `spatialLIBD` shiny
#' web application.
#'
#' @param samples Passed to [SpatialExperiment::read10xVisium()].
#' @param sample_id Passed to [SpatialExperiment::read10xVisium()].
#' @param type Passed to [SpatialExperiment::read10xVisium()].
#' @param data Passed to [SpatialExperiment::read10xVisium()].
#' @param images Passed to [SpatialExperiment::read10xVisium()].
#' @param load Passed to [SpatialExperiment::read10xVisium()].
#' @param reference_gtf A `character(1)` specifying the path to the reference
#' `genes.gtf` file. If not specified, it will be automatically inferred from
#' the `web_summary.html` file for the first `samples`.
#' @param chrM A `character(1)` specifying the chromosome name of the
#' mitochondrial chromosome. Defaults to `chrM`.
#' @param gtf_cols A `character()` specifying which columns to keep from the GTF
#' file. `"gene_name"` and `"gene_id"` have to be included in `gtf_cols`.
#' @param verbose A `logical(1)` specifying whether to show progress updates.
#'
#' @return A [SpatialExperiment][SpatialExperiment-class] object with the
#' clustering and dimension reduction (projection) results from SpaceRanger by
#' 10x Genomics as well as other information used by `run_app()` for visualzing
#' the gene expression data.
#' @export
#' @importFrom SpatialExperiment read10xVisium
#' @importFrom rtracklayer import
#' @importMethodsFrom Matrix colSums
#' @importFrom SummarizedExperiment "rowRanges<-" "rowData<-"
#' @importFrom S4Vectors "mcols<-" mcols
#' @importFrom BiocGenerics which
#' @importFrom GenomicRanges seqnames
#' @family Utility functions for reading data from SpaceRanger output by 10x
#' Genomics
#'
#' @examples
#' ## See 'Using spatialLIBD with 10x Genomics public datasets' for
#' ## a full example using this function.
#' if (interactive()) {
#' browseVignettes(package = "spatialLIBD")
#' }
#'
#' ## Note that ?SpatialExperiment::read10xVisium doesn't include all the files
#' ## we need to illustrate read10xVisiumWrapper().
read10xVisiumWrapper <- function(samples = "",
sample_id = paste0("sample", sprintf("%02d", seq_along(samples))),
type = c("HDF5", "sparse"),
data = c("filtered", "raw"),
images = c("lowres", "hires", "detected", "aligned"),
load = TRUE,
reference_gtf = NULL,
chrM = "chrM",
gtf_cols = c("source", "type", "gene_id", "gene_version", "gene_name", "gene_type"),
verbose = TRUE) {
stopifnot(all(c("gene_name", "gene_id") %in% gtf_cols))
if (missing(reference_gtf)) {
summary_file <- file.path(samples[1], "web_summary.html")
web <- readLines(summary_file)
reference_path <- gsub('.*"', "", regmatches(web, regexpr('\\["Reference Path", *"[/|A-z|0-9|-]+', web)))
reference_gtf <- file.path(reference_path, "genes", "genes.gtf")
}
reference_gtf <- reference_gtf[file.exists(reference_gtf)]
if (length(reference_gtf) > 1) {
stop("More than one 'reference_gtf' was provided or detected. Manually specify the path to just one 'reference_gtf'. If different GTF files were used, then different genes will have been quantified and thus cannot be merged naively into a single SpatialExperiment object. If that's the case, we recommend you build separate SPE objects based on the different 'reference_gtf' files used.", call. = FALSE)
} else if (length(reference_gtf) == 0) {
stop("No 'reference_gtf' files were detected. Please check that the files are available.", call. = FALSE)
}
if (verbose) message(Sys.time(), " SpatialExperiment::read10xVisium: reading basic data from SpaceRanger")
spe <- SpatialExperiment::read10xVisium(
samples = samples,
sample_id = sample_id,
type = type,
data = data,
images = images,
load = load
)
if (verbose) message(Sys.time(), " read10xVisiumAnalysis: reading analysis output from SpaceRanger")
visium_analysis <- read10xVisiumAnalysis(
samples = samples,
sample_id = sample_id
)
if (verbose) message(Sys.time(), " add10xVisiumAnalysis: adding analysis output from SpaceRanger")
spe <- add10xVisiumAnalysis(spe, visium_analysis)
## Read in the gene information from the annotation GTF file
if (verbose) message(Sys.time(), " rtracklayer::import: reading the reference GTF file")
gtf <- rtracklayer::import(reference_gtf)
gtf <- gtf[gtf$type == "gene"]
names(gtf) <- gtf$gene_id
## Match the genes
if (verbose) message(Sys.time(), " adding gene information to the SPE object")
match_genes <- match(rownames(spe), gtf$gene_id)
if (all(is.na(match_genes))) {
## Protect against scenario where one set has GENCODE IDs and the other one has ENSEMBL IDs.
warning("Gene IDs did not match. This typically happens when you are not using the same GTF file as the one that was used by SpaceRanger. For example, one file uses GENCODE IDs and the other one ENSEMBL IDs. read10xVisiumWrapper() will try to convert them to ENSEMBL IDs.", call. = FALSE)
match_genes <- match(gsub("\\..*", "", rownames(spe)), gsub("\\..*", "", gtf$gene_id))
}
if (any(is.na(match_genes))) {
warning("Dropping ", sum(is.na(match_genes)), " out of ", length(match_genes), " genes for which we don't have information on the reference GTF file. This typically happens when you are not using the same GTF file as the one that was used by SpaceRanger.", call. = FALSE)
## Drop the few genes for which we don't have information
spe <- spe[!is.na(match_genes), ]
match_genes <- match_genes[!is.na(match_genes)]
}
## Keep only some columns from the gtf
mcols(gtf) <- mcols(gtf)[, gtf_cols[gtf_cols %in% colnames(mcols(gtf))]]
## Add the gene info to our SPE object
rowRanges(spe) <- gtf[match_genes]
## Add information used by spatialLIBD
if (verbose) message(Sys.time(), " adding information used by spatialLIBD")
spe <- add_key(spe)
spe$sum_umi <- colSums(counts(spe))
spe$sum_gene <- colSums(counts(spe) > 0)
rowData(spe)$gene_search <- paste0(rowData(spe)$gene_name, "; ", rowData(spe)$gene_id)
is_mito <- which(seqnames(spe) == chrM)
spe$expr_chrM <- colSums(counts(spe)[is_mito, , drop = FALSE])
spe$expr_chrM_ratio <- spe$expr_chrM / spe$sum_umi
## Add a variable for saving the manual annotations
spe$ManualAnnotation <- "NA"
## Done!
return(spe)
}
|
##########################################################################
# Allele-specific CRMAv2
##########################################################################
library("aroma.affymetrix");
verbose <- Arguments$getVerbose(-8, timestamp=TRUE);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Setup
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
dataSet <- "GSE20584";
chipType <- "GenomeWideSNP_6,Full";
csR <- AffymetrixCelSet$byName(dataSet, chipType=chipType);
print(csR);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# AS-CRMAv2
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
dsNList <- doASCRMAv2(csR, verbose=verbose);
print(dsNList);
dsN <- exportAromaUnitPscnBinarySet(dsNList);
print(dsN);
| /aroma.affymetrix/inst/testScripts/complete/dataSets/GSE20584/21.doASCRMAv2.R | no_license | ingted/R-Examples | R | false | false | 854 | r | ##########################################################################
# Allele-specific CRMAv2
##########################################################################
library("aroma.affymetrix");
verbose <- Arguments$getVerbose(-8, timestamp=TRUE);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Setup
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
dataSet <- "GSE20584";
chipType <- "GenomeWideSNP_6,Full";
csR <- AffymetrixCelSet$byName(dataSet, chipType=chipType);
print(csR);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# AS-CRMAv2
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
dsNList <- doASCRMAv2(csR, verbose=verbose);
print(dsNList);
dsN <- exportAromaUnitPscnBinarySet(dsNList);
print(dsN);
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(7.31782994144163e-304, -1.46852425903251e+173, -1.53732818170537e+173, -5.59219752033303e+72, 5.51013241609643e-40, 3.48121950361978e-313, -4.18553736315947e+71, -4.25255837650091e+71, 0.000202180482941841, 2.00996886273231e-162, 3.15252492765492e-243, -1.94322460467577e-157, 5.2260389062578e-302, 9.53708019597101e-228, 1.75353380558032e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615857873-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 663 | r | testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(7.31782994144163e-304, -1.46852425903251e+173, -1.53732818170537e+173, -5.59219752033303e+72, 5.51013241609643e-40, 3.48121950361978e-313, -4.18553736315947e+71, -4.25255837650091e+71, 0.000202180482941841, 2.00996886273231e-162, 3.15252492765492e-243, -1.94322460467577e-157, 5.2260389062578e-302, 9.53708019597101e-228, 1.75353380558032e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{\%>\%}
\alias{\%>\%}
\title{Pipe Re-export}
\description{
re-export magrittr pipe operator
}
\keyword{internal}
| /man/grapes-greater-than-grapes.Rd | permissive | mikejohnson51/SRCgeneration | R | false | true | 204 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{\%>\%}
\alias{\%>\%}
\title{Pipe Re-export}
\description{
re-export magrittr pipe operator
}
\keyword{internal}
|
setwd("N:/Thesis.N/FreshStart_trans/TempLight_trans/TempLightSum17_trans/JA23jul17")
install.packages("chron")
library(chron)
StreamData=read.table("JA23jul17.csv",header=TRUE) ##time is in seconds since 1970 and in UTC
StreamData$dtime<-chron(StreamData$time/86400)-(7/24)
## Check new chron object dtime and O2 data
## here is where you can target and remove outliers if you want to check before running models
plot(StreamData$dtime, StreamData$oxy,cex.lab=1.5, cex.axis=1.5, lwd=1,ylim=c(0,9.5), xlab="Time",ylab="Dissolved O2 (mg/L)")
plot(StreamData$dtime, StreamData$oxy,cex.lab=1.5, cex.axis=1.5, lwd=1, xlab="Time",ylab="Dissolved O2 (mg/L)")
# END data important and management
# [2] LOAD O2 SATURATION FUNCTION
osat<- function(temp, bp) {
tstd<-log((298.15-temp) / (273.15 + temp))
a0<-2.00907
a1<-3.22014
a2<-4.0501
a3<-4.94457
a4<- -0.256847
a5<- 3.88767
u<-10^(8.10765-(1750.286/(235+temp)))
sato<-(exp(a0 + a1*tstd + a2*tstd^2 + a3*tstd^3 + a4*tstd^4+a5*tstd^5))*((bp-u)/(760-u))*1.42905
sato
}
#end of function
# END loading O2 SAT calc #
# [3] LOAD BAROMETRIC PRESSURE FUNCTION
## Function to correct barometric pressure for altitude. From Colt (2012).
## This function gives bp in mmHg for altitude given nearby measurement of standardized barometric pressure.
## temp is degC
## alt is m
## bpst is in inches of Hg and the sort of data you will find from U.S. weather stations. Delete the *25.4 if you in the rest of the world where pressure is given in metric units
####function returns mm of Hg
bpcalc<- function(bpst, alt) {
bpst*25.4*exp((-9.80665*0.0289644*alt)/(8.31447*(273.15+15)))
}
#end of function
# END loading BP calc function #######
# [4] LOAD GAS EXCHANGE (K) FUNCTIONS
# NOTE: The functions you use will depend on the methods used to estimate K (O2, propane, SF6). The temperature correction (Kcor) is embedded in the models below, but the Kcor function must be loaded into R before running the model.
# UNITS are day^(-1)
## This code does the opposite of Kcor below; it estimates K600 for KO2 at a given temperature. From Wanninkhof (1992).
K600fromO2<-function (temp, KO2) {
((600/(1800.6 - (120.1 * temp) + (3.7818 * temp^2) - (0.047608 * temp^3)))^-0.5) * KO2
}
#end of function
# This calculates K600 from K measured in a propane addition. From Raymond et al. (2012).
K600frompropane<-function (temp, Kpropane) {
((600/(2864 - (154.14 * temp) + (3.791 * temp^2) - (0.0379 * temp^3)))^-0.5) * Kpropane
}
#end of function
# This calculates K600 from K measured in a SF6 addition. From Raymond et al. (2012).
K600fromSF6<-function(temp, KSF6) {
((600/(3255.3-(217.13*temp)+(6.837*temp^2)-(0.08607*temp^3)))^-0.5)*KSF6
}
#end of function
# This function calculates KO2 at any given tempertaure from K600. via schmidt number scaling. The scaling equation if From Jähne et al. (1987), Schmidt number conversions from Wanninkhof et al. 1992.
Kcor<-function (temp,K600){
K600/(600/(1800.6-(temp*120.1)+(3.7818*temp^2)-(0.047608*temp^3)))^-0.5
}
#end of function
# END loading K functions
# [5] NIGHTTIME REGRESSION to estimate K -- OPTIONAL
### nighttime regression code to estimate K. Approach as in Hornberger and Kelly (1975).
## o2 file is your oxygen data (defined in subsetting)
## bp is barometric pressure in mm Hg for your site,
## ts is the time step in MINUTES (not days as in metabolism code below)
nightreg<-function(o2file, bp, ts){
temp<-o2file$temp
oxy<-o2file$oxy
# moving average on oxy data
oxyf1<-filter(o2file$oxy,rep(1/3,3), sides=2)
# trim the ends of the oxy data
oxyf2<- oxyf1[c(-1,-length(oxyf1))]
# calculate delO/delt; convert to units of days by ts in min-1*1440
deltaO2<-((oxyf2[-1]-oxyf2[-length(oxyf2)])/ts)*1440
# Trim the first two and last one from the temp data to match the filter oxy data
temptrim<-temp[c(-2:-1,-length(temp))]
# calc the dodef
satdef<-osat(temptrim,bp)-oxyf2[-1]
# fit linear model and plot using linear model fitting (lm) and abline functions in R stats package
nreg<-lm(deltaO2~satdef)
plot(satdef,deltaO2)
abline(nreg)
# use coef function in R stats package to get lm coefficients
coeff<-coef(nreg)
# output gives lm coeff and K600 (converted from nighttime regression estimate of KO2)
out<-list(coeff, K600fromO2(mean(temp), coeff[2]))
out
}
#end of function
# NOTE: this approach works better for some sites/dates than others; always check that your model fit is good and that your K600 estimate makes sense!
#Call as: The first argument in the function defines when to pull data. In this case on 10/27/204 (for spring creek) between 18:05 and 23:00
nightreg(StreamData[StreamData$dtime>=as.numeric(chron(dates="07/19/17", times="18:10:00")) & StreamData$dtime<=as.numeric(chron(dates="07/21/17", times="10:00:00")), ], bp=660, ts=10)
# END nighttime regression calculation of K #
# check that light makes sense
plot(StreamData$dtime, StreamData$light,cex.lab=1.5, cex.axis=1.5, lwd=1, xlab="Time",ylab="PAR (umol photons/s/m2)")
# END light
# [7] MODEL v1 - RIVERMETABK; solves for GPP, ER, AND K
##
# [7a] LOAD RIVERMETABK functions
###########################################
## VERSION 1 of possible metabolism model
##### now estimate metabolism
## parameter names (and units) for BOTH rivermetab functions (solving for or fixing K600):
## oxy.mod = modeled O2 (mg/L)
## oxy = O2 data (mg/L)
## MET[1] = GPP = estimated daily gross primary production (g O2 m-2 d-1); + flux of O2 production
## MET[2] = ER = estimated daily ecosystem respiration (g O2 m-2 d-1); - flux of O2 consumption
## MET[3] = K = estimated K600 (d-1)
## z = estimated site depth (m)
## Kcor = KO2 corrected for temperature, calculated from K600 (d-1)
## bp = barometric pressure (mmHg)
## temp = water temperature (C)
## light = PAR data (or modlight from above light function)
## ts = time step of O2 data from logger (10 min intervals --> units are day, so 10/1440=0.06944)
# Model to calculate GPP, ER and K simultaneously
# This model is advantageous in the sense that one can estimate K from the data, but beware that it may be an overparameterized model.
# Note that you have the opportunity below to use your light data or modeled light (here as data$light estimated for spring creek and french creek with the light model function above). You decide and modify the function and data names accordingly.
# rivermetabK is the master function that calls the MLE function (onestationmleK) and plotting function (onestationplot) below
rivermetabK<-function(o2file, z, bp, ts){
##pull data out of loaded o2file (subset in CALL function) and give it a name.
temp<-o2file$temp
oxy<-o2file$oxy
light<-o2file$light
##This calls onestationmleK (below) to calculate metabolism by non-linear minimization. We use nlm() function in R stats package; for more information see "help(nlm)" The first argument is the function to be minimized, the second defines the starting values. The function that is minimized (onestationmleK, see below) always has as its first argument, a vector of parameters (MET), and second argument a vector of starting values for those parameters, p=c(3,-5, 10).
river.mle<-nlm(onestationmleK, p=c(3,-5, 10), oxy=oxy, z=z,temp=temp,light=light, bp=bp, ts=ts)
##plot modeled and measaured O2 given MLE estimates of GPP, ER, and K600. It calls a function below onestationplot()
onestationplot(GPP=river.mle$estimate[1], ER=river.mle$estimate[2],oxy=oxy,z=z,temp=temp,light=light, K=river.mle$estimate[3], bp=bp, ts=ts)
##return GPP, ER, K600, and MLE values
b <- list(GPP=river.mle$estimate[1], ER=river.mle$estimate[2], K600=river.mle$estimate[3], neglogL=river.mle$minimum[1])
b
}
# end of function
# This function returns the negative log likelihood value given O2 data and estimates of GPP, ER, and K (which is vector MET); is included in master rivermetabK function above
onestationmleK<-function(MET,temp, oxy, light, z, bp, ts) {
# create new vector for modeled O2
oxy.mod<-numeric(length(data))
# give starting value from oxygen data; this is the only time O2 data is used to model GPP and ER
oxy.mod[1]<-oxy[1]
# this is the metabolism equation as in Van de Bogert et al 2007 L&OMethods
for (i in 2:length(oxy)) {oxy.mod[i]<-oxy.mod[i-1]+((MET[1]/z)*(light[i]/sum(light)))+ MET[2]*ts/z+(Kcor(temp[i],MET[3]))*ts*(osat(temp[i],bp)-oxy.mod[i-1]) }
##below is MLE calculation; output is -log likelihood
# diff in measured and modeled O2
sqdiff<-(oxy-oxy.mod)^2
# likelihood function
L <- length(oxy) * (log(((sum(sqdiff)/length(oxy))^0.5))+0.5*log(6.28)) + ((2*sum(sqdiff)/length(oxy))^-1) * sum(sqdiff)
L
}
# end of function
# this function plots modeled O2 and O2 data from estimates of daily GPP, ER, and K; is included in master rivermetabK function above
# Calls same metabolism equation as in mle function, but plots modeled O2 as a function of GPP, ER, and K estimates from mle
# use this to visually assess your model estimates (should be good agreement between modeled and measured O2)
onestationplot<-function(GPP, ER, oxy, z, temp, K, light, bp, ts) {
oxy.mod<-numeric(length(oxy))
oxy.mod[1]<-oxy[1]
# this is the metabolism equation as in Van de Bogert et al (2007) L&OMethods
for (i in 2:length(oxy)) { oxy.mod[i]<-oxy.mod[i-1]+((GPP/z)*(light[i]/sum(light)))+ ER*ts/z+(Kcor(temp[i],K))*ts*(osat(temp[i],bp)-oxy.mod[i-1]) }
plot(seq(1:length(oxy)),oxy.mod, type="l",xlab="Time", ylab="Dissolved oxygen (mg/L)", cex.lab=1.5, cex.axis=1.5, lwd=2 )
points(seq(1:length(oxy)),oxy)
}
# end of function
####### END LOADING rivermetabK function #
# [7b] CALL RIVERMETABK function
###########################################
## Call as: z is depth in m, bp is im mmHg, ts is time steps in days.
# for spring creek data; 10/28/14
###########################################
## Call as: z is depth in m, bp is im mmHg, ts is time steps in days.
# for spring creek data; 10/28/14
rivermetab<-function(o2file, z, bp, ts, K){
##pull data out of loaded o2file (subset in CALL function) and give it a name.
temp<-o2file$temp
oxy<-o2file$oxy
light<-o2file$light
##calculate metabolism by non linear minimization of MLE function (below)
river.mle<-nlm(onestationmle, p=c(3,-5), oxy=oxy, z=z,temp=temp,light=light, bp=bp, ts=ts, K=K)
##plot data; uses same plot function as given for rivermetabK above (relisted below to keep each model as own unit)
onestationplot(GPP=river.mle$estimate[1], ER=river.mle$estimate[2],oxy=oxy,z=z,temp=temp,light=light, K=K, bp=bp, ts=ts)
##return GPP, ER, and MLE value
b<-list(GPP= river.mle$estimate[1], ER= river.mle$estimate[2], neglogL= river.mle$minimum[1])
b
}
# end of function
# function returns the likelihood value given O2 data and estimates of GPP, ER (which is vector MET); is included in master rivermetab function above; K600 is fixed
onestationmle<-function(MET,temp, oxy, light, z, bp, ts, K) {
oxy.mod<-numeric(length(data))
oxy.mod[1]<-oxy[1]
# this is the metabolism equation as in Van de Bogert et al 2007 L&OMethods
for (i in 2:length(oxy)) {oxy.mod[i]<-oxy.mod[i-1]+((MET[1]/z)*(light[i]/sum(light)))+ MET[2]*ts/z+(Kcor(temp[i],K))*ts*(osat(temp[i],bp)-oxy.mod[i-1]) }
## below is MLE calculation; output is likelihood
# diff in measured and modeled O2
sqdiff<-(oxy-oxy.mod)^2
# likelihood function
L <- length(oxy)*(log(((sum(sqdiff)/length(oxy))^0.5)) +0.5*log(6.28)) + ((2*sum(sqdiff)/length(oxy))^-1)*sum(sqdiff)
L
}
#end of function
# (As in rivermetabK)
# this function plots modeled O2 and O2 data from estimates of daily GPP and ER; is included in master rivermetab function above
# Calls same metabolism equation as in mle function, but plots modeled O2 as a function of GPP, ER estimates from mle
# use this to visually assess your model estimates (should be good agreement between modeled and measured O2)
onestationplot<-function(GPP, ER, oxy, z, temp, K, light, bp, ts) {
oxy.mod<-numeric(length(oxy))
oxy.mod[1]<-oxy[1]
# this is the metabolism equation as in Van de Bogert et al (2007) L&OMethods
for (i in 2:length(oxy)) { oxy.mod[i]<-oxy.mod[i-1]+((GPP/z)*(light[i]/sum(light)))+ ER*ts/z+(Kcor(temp[i],K))*ts*(osat(temp[i],bp)-oxy.mod[i-1]) }
plot(seq(1:length(oxy)),oxy.mod, type="l",xlab="Time", ylab="Dissolved oxygen (mg/L)", cex.lab=1.5, cex.axis=1.5, lwd=2 )
points(seq(1:length(oxy)),oxy)
print(summary(lm(oxy~oxy.mod)))
}
# end of function
####### END loading rivermetab function #######
## Call as: z is depth in m, bp is im mmHg, ts is time steps in days.
# 10 MIN timestep!! literature k600 values based on slope
rivermetab(o2file=StreamData[StreamData$dtime>=as.numeric(chron(
dates="07/23/17", times="18:27:00")) & StreamData $dtime<=as.numeric(chron(
dates="07/25/17", times="09:57:00")), ],
z=0.094,
bp=719,
ts=0.006944,
K=80.768051)
# 10 MIN timestep!! my derived k600 values based on velocity
rivermetab(o2file=StreamData[StreamData$dtime>=as.numeric(chron(
dates="07/23/17", times="18:27:00")) & StreamData $dtime<=as.numeric(chron(
dates="07/25/17", times="09:57:00")), ],
z=0.094,
bp=719,
ts=0.006944,
K=26.8716762)
# 10 MIN timestep!! full model as is
rivermetabK(o2file=StreamData[ StreamData $dtime>=as.numeric(chron(
dates="07/23/17", times="18:27:00")) & StreamData $dtime<=as.numeric(chron(
dates="07/25/17", times="09:57:00")), ],
z=0.094,
bp=719,
ts=0.006944)
| /Metabolism Data/TempLight_trans/TempLightSum17_trans/JA23jul17/JAsum17.R | no_license | clayarango/PNW-Headwater-Metabolism | R | false | false | 13,546 | r | setwd("N:/Thesis.N/FreshStart_trans/TempLight_trans/TempLightSum17_trans/JA23jul17")
install.packages("chron")
library(chron)
StreamData=read.table("JA23jul17.csv",header=TRUE) ##time is in seconds since 1970 and in UTC
StreamData$dtime<-chron(StreamData$time/86400)-(7/24)
## Check new chron object dtime and O2 data
## here is where you can target and remove outliers if you want to check before running models
plot(StreamData$dtime, StreamData$oxy,cex.lab=1.5, cex.axis=1.5, lwd=1,ylim=c(0,9.5), xlab="Time",ylab="Dissolved O2 (mg/L)")
plot(StreamData$dtime, StreamData$oxy,cex.lab=1.5, cex.axis=1.5, lwd=1, xlab="Time",ylab="Dissolved O2 (mg/L)")
# END data important and management
# [2] LOAD O2 SATURATION FUNCTION
osat<- function(temp, bp) {
tstd<-log((298.15-temp) / (273.15 + temp))
a0<-2.00907
a1<-3.22014
a2<-4.0501
a3<-4.94457
a4<- -0.256847
a5<- 3.88767
u<-10^(8.10765-(1750.286/(235+temp)))
sato<-(exp(a0 + a1*tstd + a2*tstd^2 + a3*tstd^3 + a4*tstd^4+a5*tstd^5))*((bp-u)/(760-u))*1.42905
sato
}
#end of function
# END loading O2 SAT calc #
# [3] LOAD BAROMETRIC PRESSURE FUNCTION
## Function to correct barometric pressure for altitude. From Colt (2012).
## This function gives bp in mmHg for altitude given nearby measurement of standardized barometric pressure.
## temp is degC
## alt is m
## bpst is in inches of Hg and the sort of data you will find from U.S. weather stations. Delete the *25.4 if you in the rest of the world where pressure is given in metric units
####function returns mm of Hg
bpcalc<- function(bpst, alt) {
bpst*25.4*exp((-9.80665*0.0289644*alt)/(8.31447*(273.15+15)))
}
#end of function
# END loading BP calc function #######
# [4] LOAD GAS EXCHANGE (K) FUNCTIONS
# NOTE: The functions you use will depend on the methods used to estimate K (O2, propane, SF6). The temperature correction (Kcor) is embedded in the models below, but the Kcor function must be loaded into R before running the model.
# UNITS are day^(-1)
## This code does the opposite of Kcor below; it estimates K600 for KO2 at a given temperature. From Wanninkhof (1992).
K600fromO2<-function (temp, KO2) {
((600/(1800.6 - (120.1 * temp) + (3.7818 * temp^2) - (0.047608 * temp^3)))^-0.5) * KO2
}
#end of function
# This calculates K600 from K measured in a propane addition. From Raymond et al. (2012).
K600frompropane<-function (temp, Kpropane) {
((600/(2864 - (154.14 * temp) + (3.791 * temp^2) - (0.0379 * temp^3)))^-0.5) * Kpropane
}
#end of function
# This calculates K600 from K measured in a SF6 addition. From Raymond et al. (2012).
K600fromSF6<-function(temp, KSF6) {
((600/(3255.3-(217.13*temp)+(6.837*temp^2)-(0.08607*temp^3)))^-0.5)*KSF6
}
#end of function
# This function calculates KO2 at any given tempertaure from K600. via schmidt number scaling. The scaling equation if From Jähne et al. (1987), Schmidt number conversions from Wanninkhof et al. 1992.
Kcor<-function (temp,K600){
K600/(600/(1800.6-(temp*120.1)+(3.7818*temp^2)-(0.047608*temp^3)))^-0.5
}
#end of function
# END loading K functions
# [5] NIGHTTIME REGRESSION to estimate K -- OPTIONAL
### nighttime regression code to estimate K. Approach as in Hornberger and Kelly (1975).
## o2 file is your oxygen data (defined in subsetting)
## bp is barometric pressure in mm Hg for your site,
## ts is the time step in MINUTES (not days as in metabolism code below)
nightreg<-function(o2file, bp, ts){
temp<-o2file$temp
oxy<-o2file$oxy
# moving average on oxy data
oxyf1<-filter(o2file$oxy,rep(1/3,3), sides=2)
# trim the ends of the oxy data
oxyf2<- oxyf1[c(-1,-length(oxyf1))]
# calculate delO/delt; convert to units of days by ts in min-1*1440
deltaO2<-((oxyf2[-1]-oxyf2[-length(oxyf2)])/ts)*1440
# Trim the first two and last one from the temp data to match the filter oxy data
temptrim<-temp[c(-2:-1,-length(temp))]
# calc the dodef
satdef<-osat(temptrim,bp)-oxyf2[-1]
# fit linear model and plot using linear model fitting (lm) and abline functions in R stats package
nreg<-lm(deltaO2~satdef)
plot(satdef,deltaO2)
abline(nreg)
# use coef function in R stats package to get lm coefficients
coeff<-coef(nreg)
# output gives lm coeff and K600 (converted from nighttime regression estimate of KO2)
out<-list(coeff, K600fromO2(mean(temp), coeff[2]))
out
}
#end of function
# NOTE: this approach works better for some sites/dates than others; always check that your model fit is good and that your K600 estimate makes sense!
#Call as: The first argument in the function defines when to pull data. In this case on 10/27/204 (for spring creek) between 18:05 and 23:00
nightreg(StreamData[StreamData$dtime>=as.numeric(chron(dates="07/19/17", times="18:10:00")) & StreamData$dtime<=as.numeric(chron(dates="07/21/17", times="10:00:00")), ], bp=660, ts=10)
# END nighttime regression calculation of K #
# check that light makes sense
plot(StreamData$dtime, StreamData$light,cex.lab=1.5, cex.axis=1.5, lwd=1, xlab="Time",ylab="PAR (umol photons/s/m2)")
# END light
# [7] MODEL v1 - RIVERMETABK; solves for GPP, ER, AND K
##
# [7a] LOAD RIVERMETABK functions
###########################################
## VERSION 1 of possible metabolism model
##### now estimate metabolism
## parameter names (and units) for BOTH rivermetab functions (solving for or fixing K600):
## oxy.mod = modeled O2 (mg/L)
## oxy = O2 data (mg/L)
## MET[1] = GPP = estimated daily gross primary production (g O2 m-2 d-1); + flux of O2 production
## MET[2] = ER = estimated daily ecosystem respiration (g O2 m-2 d-1); - flux of O2 consumption
## MET[3] = K = estimated K600 (d-1)
## z = estimated site depth (m)
## Kcor = KO2 corrected for temperature, calculated from K600 (d-1)
## bp = barometric pressure (mmHg)
## temp = water temperature (C)
## light = PAR data (or modlight from above light function)
## ts = time step of O2 data from logger (10 min intervals --> units are day, so 10/1440=0.06944)
# Model to calculate GPP, ER and K simultaneously
# This model is advantageous in the sense that one can estimate K from the data, but beware that it may be an overparameterized model.
# Note that you have the opportunity below to use your light data or modeled light (here as data$light estimated for spring creek and french creek with the light model function above). You decide and modify the function and data names accordingly.
# rivermetabK is the master function that calls the MLE function (onestationmleK) and plotting function (onestationplot) below
rivermetabK<-function(o2file, z, bp, ts){
##pull data out of loaded o2file (subset in CALL function) and give it a name.
temp<-o2file$temp
oxy<-o2file$oxy
light<-o2file$light
##This calls onestationmleK (below) to calculate metabolism by non-linear minimization. We use nlm() function in R stats package; for more information see "help(nlm)" The first argument is the function to be minimized, the second defines the starting values. The function that is minimized (onestationmleK, see below) always has as its first argument, a vector of parameters (MET), and second argument a vector of starting values for those parameters, p=c(3,-5, 10).
river.mle<-nlm(onestationmleK, p=c(3,-5, 10), oxy=oxy, z=z,temp=temp,light=light, bp=bp, ts=ts)
##plot modeled and measaured O2 given MLE estimates of GPP, ER, and K600. It calls a function below onestationplot()
onestationplot(GPP=river.mle$estimate[1], ER=river.mle$estimate[2],oxy=oxy,z=z,temp=temp,light=light, K=river.mle$estimate[3], bp=bp, ts=ts)
##return GPP, ER, K600, and MLE values
b <- list(GPP=river.mle$estimate[1], ER=river.mle$estimate[2], K600=river.mle$estimate[3], neglogL=river.mle$minimum[1])
b
}
# end of function
# This function returns the negative log likelihood value given O2 data and estimates of GPP, ER, and K (which is vector MET); is included in master rivermetabK function above
onestationmleK<-function(MET,temp, oxy, light, z, bp, ts) {
# create new vector for modeled O2
oxy.mod<-numeric(length(data))
# give starting value from oxygen data; this is the only time O2 data is used to model GPP and ER
oxy.mod[1]<-oxy[1]
# this is the metabolism equation as in Van de Bogert et al 2007 L&OMethods
for (i in 2:length(oxy)) {oxy.mod[i]<-oxy.mod[i-1]+((MET[1]/z)*(light[i]/sum(light)))+ MET[2]*ts/z+(Kcor(temp[i],MET[3]))*ts*(osat(temp[i],bp)-oxy.mod[i-1]) }
##below is MLE calculation; output is -log likelihood
# diff in measured and modeled O2
sqdiff<-(oxy-oxy.mod)^2
# likelihood function
L <- length(oxy) * (log(((sum(sqdiff)/length(oxy))^0.5))+0.5*log(6.28)) + ((2*sum(sqdiff)/length(oxy))^-1) * sum(sqdiff)
L
}
# end of function
# this function plots modeled O2 and O2 data from estimates of daily GPP, ER, and K; is included in master rivermetabK function above
# Calls same metabolism equation as in mle function, but plots modeled O2 as a function of GPP, ER, and K estimates from mle
# use this to visually assess your model estimates (should be good agreement between modeled and measured O2)
onestationplot<-function(GPP, ER, oxy, z, temp, K, light, bp, ts) {
oxy.mod<-numeric(length(oxy))
oxy.mod[1]<-oxy[1]
# this is the metabolism equation as in Van de Bogert et al (2007) L&OMethods
for (i in 2:length(oxy)) { oxy.mod[i]<-oxy.mod[i-1]+((GPP/z)*(light[i]/sum(light)))+ ER*ts/z+(Kcor(temp[i],K))*ts*(osat(temp[i],bp)-oxy.mod[i-1]) }
plot(seq(1:length(oxy)),oxy.mod, type="l",xlab="Time", ylab="Dissolved oxygen (mg/L)", cex.lab=1.5, cex.axis=1.5, lwd=2 )
points(seq(1:length(oxy)),oxy)
}
# end of function
####### END LOADING rivermetabK function #
# [7b] CALL RIVERMETABK function
###########################################
## Call as: z is depth in m, bp is im mmHg, ts is time steps in days.
# for spring creek data; 10/28/14
###########################################
## Call as: z is depth in m, bp is im mmHg, ts is time steps in days.
# for spring creek data; 10/28/14
rivermetab<-function(o2file, z, bp, ts, K){
##pull data out of loaded o2file (subset in CALL function) and give it a name.
temp<-o2file$temp
oxy<-o2file$oxy
light<-o2file$light
##calculate metabolism by non linear minimization of MLE function (below)
river.mle<-nlm(onestationmle, p=c(3,-5), oxy=oxy, z=z,temp=temp,light=light, bp=bp, ts=ts, K=K)
##plot data; uses same plot function as given for rivermetabK above (relisted below to keep each model as own unit)
onestationplot(GPP=river.mle$estimate[1], ER=river.mle$estimate[2],oxy=oxy,z=z,temp=temp,light=light, K=K, bp=bp, ts=ts)
##return GPP, ER, and MLE value
b<-list(GPP= river.mle$estimate[1], ER= river.mle$estimate[2], neglogL= river.mle$minimum[1])
b
}
# end of function
# function returns the likelihood value given O2 data and estimates of GPP, ER (which is vector MET); is included in master rivermetab function above; K600 is fixed
onestationmle<-function(MET,temp, oxy, light, z, bp, ts, K) {
oxy.mod<-numeric(length(data))
oxy.mod[1]<-oxy[1]
# this is the metabolism equation as in Van de Bogert et al 2007 L&OMethods
for (i in 2:length(oxy)) {oxy.mod[i]<-oxy.mod[i-1]+((MET[1]/z)*(light[i]/sum(light)))+ MET[2]*ts/z+(Kcor(temp[i],K))*ts*(osat(temp[i],bp)-oxy.mod[i-1]) }
## below is MLE calculation; output is likelihood
# diff in measured and modeled O2
sqdiff<-(oxy-oxy.mod)^2
# likelihood function
L <- length(oxy)*(log(((sum(sqdiff)/length(oxy))^0.5)) +0.5*log(6.28)) + ((2*sum(sqdiff)/length(oxy))^-1)*sum(sqdiff)
L
}
#end of function
# (As in rivermetabK)
# this function plots modeled O2 and O2 data from estimates of daily GPP and ER; is included in master rivermetab function above
# Calls same metabolism equation as in mle function, but plots modeled O2 as a function of GPP, ER estimates from mle
# use this to visually assess your model estimates (should be good agreement between modeled and measured O2)
onestationplot<-function(GPP, ER, oxy, z, temp, K, light, bp, ts) {
oxy.mod<-numeric(length(oxy))
oxy.mod[1]<-oxy[1]
# this is the metabolism equation as in Van de Bogert et al (2007) L&OMethods
for (i in 2:length(oxy)) { oxy.mod[i]<-oxy.mod[i-1]+((GPP/z)*(light[i]/sum(light)))+ ER*ts/z+(Kcor(temp[i],K))*ts*(osat(temp[i],bp)-oxy.mod[i-1]) }
plot(seq(1:length(oxy)),oxy.mod, type="l",xlab="Time", ylab="Dissolved oxygen (mg/L)", cex.lab=1.5, cex.axis=1.5, lwd=2 )
points(seq(1:length(oxy)),oxy)
print(summary(lm(oxy~oxy.mod)))
}
# end of function
####### END loading rivermetab function #######
## Call as: z is depth in m, bp is im mmHg, ts is time steps in days.
# 10 MIN timestep!! literature k600 values based on slope
rivermetab(o2file=StreamData[StreamData$dtime>=as.numeric(chron(
dates="07/23/17", times="18:27:00")) & StreamData $dtime<=as.numeric(chron(
dates="07/25/17", times="09:57:00")), ],
z=0.094,
bp=719,
ts=0.006944,
K=80.768051)
# 10 MIN timestep!! my derived k600 values based on velocity
rivermetab(o2file=StreamData[StreamData$dtime>=as.numeric(chron(
dates="07/23/17", times="18:27:00")) & StreamData $dtime<=as.numeric(chron(
dates="07/25/17", times="09:57:00")), ],
z=0.094,
bp=719,
ts=0.006944,
K=26.8716762)
# 10 MIN timestep!! full model as is
rivermetabK(o2file=StreamData[ StreamData $dtime>=as.numeric(chron(
dates="07/23/17", times="18:27:00")) & StreamData $dtime<=as.numeric(chron(
dates="07/25/17", times="09:57:00")), ],
z=0.094,
bp=719,
ts=0.006944)
|
# Hello from github
print("Hello from github")
name = "abdul baseer"
age = "25"
print(ism)
print(omr)
print("it is written in Rstudio")
| /testing.r | no_license | abdulbaseer2294/myfirst | R | false | false | 137 | r | # Hello from github
print("Hello from github")
name = "abdul baseer"
age = "25"
print(ism)
print(omr)
print("it is written in Rstudio")
|
install.packages("rJava")
install.packages("memoise")
install.packages("KoNLP")
Sys.setenv(JAVA_HOME="C:/Program Files/Java/jdk-11.0.2")
library(KoNLP)
library(dplyr)
useNIADic()
txt<-readLines("hiphop.txt")
install.packages("stringr")
library(stringr)
txt<-str_replace_all(txt,"\\W"," ")
nouns<-extractNoun(txt)
wordcount<-table(unlist(nouns))
df_word<-as.data.frame(wordcount,stringsAsFactors = F)
df_word<-rename(df_word,
word = Var1,
freq = Freq)
df_word<-filter(df_word,nchar(word)>=2)
install.packages("wordcloud")
library(wordcloud)
library(RColorBrewer)
pal<-brewer.pal(8,"Greens")
set.seed(1234)
wordcloud(words = df_word$word,
freq = df_word$freq,
min.freq=2,
max.words=200,
random.order=F,
rot.per=.1,
scale=c(4,0.3),
colors=pal)
| /worldcloud.R | no_license | jykyeong99/R-prcatice | R | false | false | 843 | r | install.packages("rJava")
install.packages("memoise")
install.packages("KoNLP")
Sys.setenv(JAVA_HOME="C:/Program Files/Java/jdk-11.0.2")
library(KoNLP)
library(dplyr)
useNIADic()
txt<-readLines("hiphop.txt")
install.packages("stringr")
library(stringr)
txt<-str_replace_all(txt,"\\W"," ")
nouns<-extractNoun(txt)
wordcount<-table(unlist(nouns))
df_word<-as.data.frame(wordcount,stringsAsFactors = F)
df_word<-rename(df_word,
word = Var1,
freq = Freq)
df_word<-filter(df_word,nchar(word)>=2)
install.packages("wordcloud")
library(wordcloud)
library(RColorBrewer)
pal<-brewer.pal(8,"Greens")
set.seed(1234)
wordcloud(words = df_word$word,
freq = df_word$freq,
min.freq=2,
max.words=200,
random.order=F,
rot.per=.1,
scale=c(4,0.3),
colors=pal)
|
\name{gamsim}
\alias{gamsim}
\title{
Simulate example datasets from a generalized additive models (GAM).
}
\description{
This function is a modification from example 7 of the gamSim function available in the mgcv package (Wood, 2017), which is turn is Gu and Wahba 4 univariate example with correlated predictors. Please see the source code for exactly what is simulated. The function is primarily used as the basis for conducting the simulation studies in Hui et al., (2018).
}
\usage{
gamsim(n = 400, extra.X = NULL, beta = NULL, dist = "normal", scale = 1, offset = NULL)
}
\arguments{
\item{n}{Sample size.}
\item{extra.X}{Extra covariates, including critically an intercept if is to be included in the linear predictor for the GAM.}
\item{beta}{Regression coefficient estimates.}
\item{dist}{Currently only the "normal", "poisson" or "binomial" corresponding to the binomial distributions are available.}
\item{scale}{Scale parameter in the Normal distribution.}
\item{offset}{This can be used to specify an a-priori known component to be included in the linear predictor during fitting. This should be \code{NULL} or a numeric vector of length equal to \code{n}.}
}
\value{
A data frame containing information such as the simulated responses, covariates, each of the 4 "truth" smooths, and the overall linear predictor.
}
\references{
\itemize{
\item Hui, F. K. C., You, C., Shang, H. L., and Mueller, S. (2018). Semiparametric regression using variational approximations, \emph{Journal of the American Statistical Association}, \bold{forthcoming}.
\item Wood, S. N. (2017) Generalized Additive Models: An Introduction with R (2nd edition). Chapman and Hall/CRC.
} }
\author{
\packageAuthor{vagam}
}
\seealso{
\code{\link{vagam} for the main fitting function}
}
\keyword{datagen}
\examples{
normal_dat = gamsim(n = 40, dist = "normal",
extra.X = data.frame(int = rep(1,40), trt = rep(c(0,1), each = 20)),
beta = c(-1, 0.5))
pois_dat = gamsim(n = 40, dist = "poisson",
extra.X = data.frame(int = rep(1, 40), trt = rep(c(0,1), each = 20)),
beta = c(-1, 0.5))
binom_dat = gamsim(n = 40, dist = "binomial",
extra.X = data.frame(int = rep(1, 40), trt = rep(c(0,1), each = 20)),
beta = c(0, 0.5))
## Please see examples in the help file for the vagam function.
}
| /man/gamsim.Rd | no_license | cran/vagam | R | false | false | 2,398 | rd | \name{gamsim}
\alias{gamsim}
\title{
Simulate example datasets from a generalized additive models (GAM).
}
\description{
This function is a modification from example 7 of the gamSim function available in the mgcv package (Wood, 2017), which is turn is Gu and Wahba 4 univariate example with correlated predictors. Please see the source code for exactly what is simulated. The function is primarily used as the basis for conducting the simulation studies in Hui et al., (2018).
}
\usage{
gamsim(n = 400, extra.X = NULL, beta = NULL, dist = "normal", scale = 1, offset = NULL)
}
\arguments{
\item{n}{Sample size.}
\item{extra.X}{Extra covariates, including critically an intercept if is to be included in the linear predictor for the GAM.}
\item{beta}{Regression coefficient estimates.}
\item{dist}{Currently only the "normal", "poisson" or "binomial" corresponding to the binomial distributions are available.}
\item{scale}{Scale parameter in the Normal distribution.}
\item{offset}{This can be used to specify an a-priori known component to be included in the linear predictor during fitting. This should be \code{NULL} or a numeric vector of length equal to \code{n}.}
}
\value{
A data frame containing information such as the simulated responses, covariates, each of the 4 "truth" smooths, and the overall linear predictor.
}
\references{
\itemize{
\item Hui, F. K. C., You, C., Shang, H. L., and Mueller, S. (2018). Semiparametric regression using variational approximations, \emph{Journal of the American Statistical Association}, \bold{forthcoming}.
\item Wood, S. N. (2017) Generalized Additive Models: An Introduction with R (2nd edition). Chapman and Hall/CRC.
} }
\author{
\packageAuthor{vagam}
}
\seealso{
\code{\link{vagam} for the main fitting function}
}
\keyword{datagen}
\examples{
normal_dat = gamsim(n = 40, dist = "normal",
extra.X = data.frame(int = rep(1,40), trt = rep(c(0,1), each = 20)),
beta = c(-1, 0.5))
pois_dat = gamsim(n = 40, dist = "poisson",
extra.X = data.frame(int = rep(1, 40), trt = rep(c(0,1), each = 20)),
beta = c(-1, 0.5))
binom_dat = gamsim(n = 40, dist = "binomial",
extra.X = data.frame(int = rep(1, 40), trt = rep(c(0,1), each = 20)),
beta = c(0, 0.5))
## Please see examples in the help file for the vagam function.
}
|
library(rvest)
library(XML)
library(magrittr)
aurl="https://www.amazon.in/Test-Exclusive-604/product-reviews/B07HGLBZ5R/ref=cm_cr_arp_d_paging_btm_next_2?ie=UTF8&reviewerType=all_reviews&pageNumber"
amazon_reviews <- NULL
for (i in 1:10){
murl <- read_html(as.character(paste(aurl,i,sep="=")))
rev <- murl %>%
html_nodes(".review-text") %>%
html_text()
amazon_reviews <- c(amazon_reviews,rev)
}
write.table(amazon_reviews,"oneplustt.txt",row.names = F)
x=as.character(amazon_reviews)
#Corpus
install.packages("tm")
library("tm")
x=Corpus(VectorSource(x))
inspect(x[1])
#Data cleaning
x1=tm_map(x,tolower)
inspect(x1[1])
x1=tm_map(x1,removePunctuation)
inspect(x1[1])
x1=tm_map(x1,removeNumbers)
inspect(x1[1])
x1=tm_map(x1,removeWords,stopwords("english"))
inspect(x1[1])
#striping white space
x1=tm_map(x1,stripWhitespace)
inspect(x1[1])
#Term document matrix
#converting unstructured data into structured format using TDM
tdm=TermDocumentMatrix(x1)
dtm=t(tdm)
tdm=as.matrix(tdm)
tdm
#Bar plot
w=rowSums(tdm)
w
w_sub=subset(w,w>=20)
w_sub
barplot(w_sub,las=3,col = rainbow(20))
#Term which repeats in allmost all documetns
x1=tm_map(x1,removeWords,c("also","plus","one","using","get","now","like","can","will","really"))
x1=tm_map(x1,stripWhitespace)
tdm=TermDocumentMatrix(x1)
tdm=as.matrix(tdm)
tdm
w1=rowSums(tdm)
w1
#Word Cloud
install.packages("wordcloud")
library(wordcloud)
wordcloud(words=names(w1),freq=w1)
w_sub1=sort(rowSums(tdm),decreasing = TRUE)
w_sub1
wordcloud(words = names(w_sub1), freq = w_sub1,
random.order = F,
colors = brewer.pal(8, 'Dark2'),
scale = c(1.5,0.5))
#wordcloud(words=names(w_sub1),freq=w_sub1)
#Loading postive and negative dictionaries
pos.words=scan(file.choose(),what="character",comment.char=";")
neg.words=scan(file.choose(),what="character",comment.char=";")
#postive wordcloud
pos.matches=match(names(w_sub1), c(pos.words))
pos.matches=!is.na(pos.matches)
freq_pos=w_sub1[pos.matches]
p_names=names(freq_pos)
wordcloud(p_names,freq_pos,scale=c(3,0.5),colors=rainbow(20))
#negative wordcloud
neg.matches=match(names(w_sub1), c(neg.words))
neg.matches=!is.na(neg.matches)
freq_neg=w_sub1[neg.matches]
n_names=names(freq_neg)
wordcloud(n_names,freq_neg,scale=c(2,1.0),colors=rainbow(20))
# Bi gram word clouds
#install.packages("quanteda")
library(quanteda)
#install.packages("Matrix")
library(Matrix)
# Bi gram document term frequency
dtm0_2 <- dfm(unlist(x1),ngrams=3,verbose = F)
tdm0_2 <- t(dtm0_2)
a0 = NULL
for (i1 in 1:ncol(tdm0_2)){ if (sum(tdm0_2[, i1]) == 0) {a0 = c(a0, i1)} }
length(a0) # no. of empty docs in the corpus
if (length(a0) >0) { tdm0_2 = tdm0_2[, -a0]} else {tdm0_2 = tdm0_2}; dim(tdm0_2)
a0 <- NULL;i1 <- NULL
dtm0_2 <- t(tdm0_2)
makewordc = function(x1){
freq = sort(rowSums(as.matrix(x1)),decreasing = TRUE)
freq.df = data.frame(word=names(freq), freq=freq)
windows()
wordcloud(freq.df$word[1:120], freq.df$freq[1:120],scale = c(4,.5),random.order = F, colors=1:10)
}
# Bi gram word cloud
makewordc(tdm0_2) # We have too see warnings to edit few words
title(sub = "BIGRAM - Wordcloud using TF")
words_bar_plot <- function(x1){
freq = sort(rowSums(as.matrix(x1)),decreasing = TRUE)
freq.df = data.frame(word=names(freq), freq=freq)
head(freq.df, 20)
library(ggplot2)
windows()
ggplot(head(freq.df,50), aes(reorder(word,freq), freq)) +
geom_bar(stat = "identity") + coord_flip() +
xlab("Words") + ylab("Frequency") +
ggtitle("Most frequent words")
}
# Bi gram barplot on TF
words_bar_plot(tdm0_2)
#Emotion mining
install.packages("syuzhet")
library("syuzhet")
y=readLines(file.choose())
gs=get_sentences(y)
sentiment_vector <- get_sentiment(gs, method = "bing")
sum(sentiment_vector)
mean(sentiment_vector)
summary(sentiment_vector)
# To extract the sentence with the most negative emotional valence
negative <- gs[which.min(sentiment_vector)]
negative
# and to extract the most positive sentence
positive <- gs[which.max(sentiment_vector)]
positive
# plot
plot(sentiment_vector, type = "l", main = "Plot Trajectory",
xlab = "Narrative Time", ylab = "Emotional Valence")
abline(h = 0, col = "red")
ft_values <- get_transformed_values(
sentiment_vector,
low_pass_size = 3,
x_reverse_len = 100,
scale_vals = TRUE,
scale_range = FALSE
)
plot(
ft_values,
type ="h",
main ="LOTR using Transformed Values",
xlab = "Narrative Time",
ylab = "Emotional Valence",
col = "red"
)
# categorize each sentence by eight emotions
###This takes a lot of time#######
nrc_data <- get_nrc_sentiment(gs)
# To view the emotions as a barplot
barplot(sort(colSums(prop.table(nrc_data[, 1:8]))), horiz = T, cex.names = 0.7,
las = 1, main = "Emotions", xlab = "Percentage",
col = 1:8)
| /amazon_review.R | no_license | SarthakChitre/DS | R | false | false | 4,973 | r | library(rvest)
library(XML)
library(magrittr)
aurl="https://www.amazon.in/Test-Exclusive-604/product-reviews/B07HGLBZ5R/ref=cm_cr_arp_d_paging_btm_next_2?ie=UTF8&reviewerType=all_reviews&pageNumber"
amazon_reviews <- NULL
for (i in 1:10){
murl <- read_html(as.character(paste(aurl,i,sep="=")))
rev <- murl %>%
html_nodes(".review-text") %>%
html_text()
amazon_reviews <- c(amazon_reviews,rev)
}
write.table(amazon_reviews,"oneplustt.txt",row.names = F)
x=as.character(amazon_reviews)
#Corpus
install.packages("tm")
library("tm")
x=Corpus(VectorSource(x))
inspect(x[1])
#Data cleaning
x1=tm_map(x,tolower)
inspect(x1[1])
x1=tm_map(x1,removePunctuation)
inspect(x1[1])
x1=tm_map(x1,removeNumbers)
inspect(x1[1])
x1=tm_map(x1,removeWords,stopwords("english"))
inspect(x1[1])
#striping white space
x1=tm_map(x1,stripWhitespace)
inspect(x1[1])
#Term document matrix
#converting unstructured data into structured format using TDM
tdm=TermDocumentMatrix(x1)
dtm=t(tdm)
tdm=as.matrix(tdm)
tdm
#Bar plot
w=rowSums(tdm)
w
w_sub=subset(w,w>=20)
w_sub
barplot(w_sub,las=3,col = rainbow(20))
#Term which repeats in allmost all documetns
x1=tm_map(x1,removeWords,c("also","plus","one","using","get","now","like","can","will","really"))
x1=tm_map(x1,stripWhitespace)
tdm=TermDocumentMatrix(x1)
tdm=as.matrix(tdm)
tdm
w1=rowSums(tdm)
w1
#Word Cloud
install.packages("wordcloud")
library(wordcloud)
wordcloud(words=names(w1),freq=w1)
w_sub1=sort(rowSums(tdm),decreasing = TRUE)
w_sub1
wordcloud(words = names(w_sub1), freq = w_sub1,
random.order = F,
colors = brewer.pal(8, 'Dark2'),
scale = c(1.5,0.5))
#wordcloud(words=names(w_sub1),freq=w_sub1)
#Loading postive and negative dictionaries
pos.words=scan(file.choose(),what="character",comment.char=";")
neg.words=scan(file.choose(),what="character",comment.char=";")
#postive wordcloud
pos.matches=match(names(w_sub1), c(pos.words))
pos.matches=!is.na(pos.matches)
freq_pos=w_sub1[pos.matches]
p_names=names(freq_pos)
wordcloud(p_names,freq_pos,scale=c(3,0.5),colors=rainbow(20))
#negative wordcloud
neg.matches=match(names(w_sub1), c(neg.words))
neg.matches=!is.na(neg.matches)
freq_neg=w_sub1[neg.matches]
n_names=names(freq_neg)
wordcloud(n_names,freq_neg,scale=c(2,1.0),colors=rainbow(20))
# Bi gram word clouds
#install.packages("quanteda")
library(quanteda)
#install.packages("Matrix")
library(Matrix)
# Bi gram document term frequency
dtm0_2 <- dfm(unlist(x1),ngrams=3,verbose = F)
tdm0_2 <- t(dtm0_2)
a0 = NULL
for (i1 in 1:ncol(tdm0_2)){ if (sum(tdm0_2[, i1]) == 0) {a0 = c(a0, i1)} }
length(a0) # no. of empty docs in the corpus
if (length(a0) >0) { tdm0_2 = tdm0_2[, -a0]} else {tdm0_2 = tdm0_2}; dim(tdm0_2)
a0 <- NULL;i1 <- NULL
dtm0_2 <- t(tdm0_2)
makewordc = function(x1){
freq = sort(rowSums(as.matrix(x1)),decreasing = TRUE)
freq.df = data.frame(word=names(freq), freq=freq)
windows()
wordcloud(freq.df$word[1:120], freq.df$freq[1:120],scale = c(4,.5),random.order = F, colors=1:10)
}
# Bi gram word cloud
makewordc(tdm0_2) # We have too see warnings to edit few words
title(sub = "BIGRAM - Wordcloud using TF")
words_bar_plot <- function(x1){
freq = sort(rowSums(as.matrix(x1)),decreasing = TRUE)
freq.df = data.frame(word=names(freq), freq=freq)
head(freq.df, 20)
library(ggplot2)
windows()
ggplot(head(freq.df,50), aes(reorder(word,freq), freq)) +
geom_bar(stat = "identity") + coord_flip() +
xlab("Words") + ylab("Frequency") +
ggtitle("Most frequent words")
}
# Bi gram barplot on TF
words_bar_plot(tdm0_2)
#Emotion mining
install.packages("syuzhet")
library("syuzhet")
y=readLines(file.choose())
gs=get_sentences(y)
sentiment_vector <- get_sentiment(gs, method = "bing")
sum(sentiment_vector)
mean(sentiment_vector)
summary(sentiment_vector)
# To extract the sentence with the most negative emotional valence
negative <- gs[which.min(sentiment_vector)]
negative
# and to extract the most positive sentence
positive <- gs[which.max(sentiment_vector)]
positive
# plot
plot(sentiment_vector, type = "l", main = "Plot Trajectory",
xlab = "Narrative Time", ylab = "Emotional Valence")
abline(h = 0, col = "red")
ft_values <- get_transformed_values(
sentiment_vector,
low_pass_size = 3,
x_reverse_len = 100,
scale_vals = TRUE,
scale_range = FALSE
)
plot(
ft_values,
type ="h",
main ="LOTR using Transformed Values",
xlab = "Narrative Time",
ylab = "Emotional Valence",
col = "red"
)
# categorize each sentence by eight emotions
###This takes a lot of time#######
nrc_data <- get_nrc_sentiment(gs)
# To view the emotions as a barplot
barplot(sort(colSums(prop.table(nrc_data[, 1:8]))), horiz = T, cex.names = 0.7,
las = 1, main = "Emotions", xlab = "Percentage",
col = 1:8)
|
if (interactive()) {
source("renv/activate.R")
suppressMessages(require(devtools))
suppressMessages(require(testthat))
suppressMessages(require(usethis))
}
| /.Rprofile | permissive | jaybee84/projectlive.modules | R | false | false | 165 | rprofile | if (interactive()) {
source("renv/activate.R")
suppressMessages(require(devtools))
suppressMessages(require(testthat))
suppressMessages(require(usethis))
}
|
test_hasQC=function(){
para <- new("metaXpara")
pfile <- system.file("extdata/MTBLS79.txt",package = "metaX")
sfile <- system.file("extdata/MTBLS79_sampleList.txt",package = "metaX")
para@rawPeaks <- read.delim(pfile,check.names = FALSE)
para@sampleListFile <- sfile
para <- reSetPeaksData(para)
checkEquals(hasQC(para),TRUE)
}
| /inst/unitTests/test_hasQC.R | no_license | jaspershen/metaX | R | false | false | 347 | r | test_hasQC=function(){
para <- new("metaXpara")
pfile <- system.file("extdata/MTBLS79.txt",package = "metaX")
sfile <- system.file("extdata/MTBLS79_sampleList.txt",package = "metaX")
para@rawPeaks <- read.delim(pfile,check.names = FALSE)
para@sampleListFile <- sfile
para <- reSetPeaksData(para)
checkEquals(hasQC(para),TRUE)
}
|
.onAttach <- function(libname, pkgname) {
if (!("ptdspkg" %in% rownames(installed.packages()))) {
packageStartupMessage(
paste0(
"Please install `ptdspkg` by",
" `devtools::install_github('SMAC-Group/ptdspkg')`"
)
)
}
}
| /R/zzz.R | no_license | irudnyts/dummypkg | R | false | false | 302 | r | .onAttach <- function(libname, pkgname) {
if (!("ptdspkg" %in% rownames(installed.packages()))) {
packageStartupMessage(
paste0(
"Please install `ptdspkg` by",
" `devtools::install_github('SMAC-Group/ptdspkg')`"
)
)
}
}
|
# Fetching data from - Mother Jones - Mass Shootings Database, 198 --------
library(tidyverse)
library(googlesheets)
library(magrittr)
library(plotly)
library(conflicted)
library(ggthemes)
library(stringr); library(stringi)
library(tidytext)
library(tm)
filter <- dplyr::filter # resolves filter-function conflict
as.Date <- base::as.Date
# register googlesheet from "Mother Jones - Mass Shootings Database 1982 - 2018
gs_mass_shootings <- gs_url(x = "https://docs.google.com/spreadsheets/d/1b9o6uDO18sLxBqPwl_Gh9bnhW-ev_dABH83M5Vb5L8o/edit#gid=0")
## Get data out of registered gs object
mass_shootings <- gs_mass_shootings %>% gs_read(ws = "Sheet1")
# Create state and city variables from location
mass_shootings %<>% separate(col = location, into = c("city", "state"), sep = ", ", remove = FALSE)
# Create data variable from date - some dates have 3/3/xx other have 3/3/xxxx
mass_shootings %<>% mutate(date = str_replace_all(date, pattern = "([0-9]{1,2}\\/[0-9]{1,2})\\/([0-9]{2}$)", replacement = "\\1\\/20\\2"))
mass_shootings %<>% mutate(date = base::as.Date(date, format = "%m/%d/%Y"))
## Change variable names with spaces or other symbols to underscore
names(mass_shootings) %<>% str_remove_all(pattern = "-|\\(|\\)") %>% str_replace_all(pattern = "[:space:]", replacement = "_") %>% str_replace_all(pattern = "__", replacement = "_")
## Clean up character-type variables
mass_shootings %<>% mutate(prior_signs_mental_health_issues = str_replace_all(prior_signs_mental_health_issues, pattern = "-", replacement = "TBD"),
mental_health_details = str_replace_all(mental_health_details, pattern = "-", replacement = "TBD"),
weapons_obtained_legally = str_replace_all(weapons_obtained_legally, pattern = "-", replacement = "TBD"),
where_obtained = str_replace_all(where_obtained, pattern = "-", replacement = "TBD"),
weapon_details = str_replace_all(mass_shootings$weapon_details, pattern = "^-$", replacement = "Unknown"),
weapons_obtained_legally = str_replace_all(weapons_obtained_legally, pattern = 'Yes \\(\\".+\\"\\)|^\\nYes', replacement = "Yes"),
weapons_obtained_legally = str_replace_all(weapons_obtained_legally, pattern = 'Kelley+', replacement = "Passed federal background check"))
## Clean up gender variable
mass_shootings %<>% mutate(gender = as.factor(gender))
mass_shootings %<>% mutate(gender = fct_collapse(gender, Male = c("M", "Male")))
## Collapse race variable
mass_shootings %<>% mutate(race = as.factor(race)) %>% mutate(race = fct_collapse(race, White = c("White", "white"), Black = c("Black", "black"), Unclear = c("-", "unclear")))
## Add key variable for plotting
mass_shootings %<>% mutate(key = row.names(mass_shootings))
# Plotly map - options ----------------------------------------------------
g <- list(
scope = 'usa',
projection = list(type = 'albers usa'),
showland = TRUE,
landcolor = toRGB("#8C96B3"),
subunitwidth = 1,
countrywidth = 1,
subunitcolor = toRGB("white"),
countrycolor = toRGB("white"),
showlakes = TRUE,
lakecolor = toRGB("#4C567A")
)
m <- list(
l = 0,
r = 5,
b = 5,
t = 30,
pad = 2
)
# Moving average - plot processing ----------------------------------------
library(lubridate)
library(tidyquant)
# Rolling mean
mass_rolling_mean <- mass_shootings %>%
tq_mutate(
# tq_mutate args
select = total_victims,
mutate_fun = rollapply,
# rollapply args
width = 6,
align = "right",
FUN = mean,
# mean args
na.rm = TRUE,
# tq_mutate args
col_rename = "roll_mean"
)
# Number boxes ------------------------------------------------------------
## Preprocessing for value boxes
summary <- base::summary
# Precent gender
percent_gender <- mass_shootings %>% group_by(gender) %>% summarise(count = n()) %>%
mutate(percent = (count/sum(count))*100)
# Precent race
mass_shootings %>% group_by(race) %>% summarise(count = n()) %>% mutate(percent = count/sum(count))
# Text processing ---------------------------------------------------------
## Regex pattern to detect names and potential middle names (M. or full name) - acounts for del, -, III, Mc, etc
name_pattern <- "(?<!the )[A-Z][a-z]+ [A-Z]?[']?[A-Z][a-z]+ [A-Z][a-z]+\\, |^[A-Z|a-z]+ ([A-Z]\\.? )?[A-Z][a-z]+\\, |^[A-Z][a-z]+ [A-Z][a-z]+ (del)? [A-Z][a-z]+\\,? |[A-Z|a-z]+ ([A-Z]\\.? )?[A-Z][a-z]+\\, |[A-Z|a-z]+ [A-Z][a-z]+ III\\, |[A-Z|a-z]+ Mc[A-Z][a-z]+\\, |[A-Z][a-z]+\\-[A-Z][a-z]+ [A-Z][a-z]+\\, |[A-Z][a-z]+ [A-Z][a-z]+ [A-Z][a-z]+\\,? " # perl=TRUE
names <- str_extract_all(mass_shootings$summary, pattern = name_pattern)
## Entry 61 has a weird character in it - manual entry Sulejman Talović grepl(mass_shootings$summary, pattern = "\U{0107}")
#names[61] <- "Sulejman Talović"
#
## New way of dealing with empty name slot that keeps moving
mass_shootings$name <- unlist(lapply(names, function(x) ifelse(is.null(x), NA, x)))
#mass_shootings$name <- names %>% map(1) %>% unlist(use.names = TRUE)# sapply(test, function(x) x[1])
#
# Delete all white spaces and commas in names
mass_shootings %<>% mutate(name = str_remove_all(name, pattern = ", $"))
# Dealing with shooter age ------------------------------------------------
mass_shootings$age <- as.numeric(mass_shootings$age_of_shooter)
# Text analysis -----------------------------------------------------------
## Normalize and clean text
# Remove white space in the beginning and end
mass_shootings %<>% mutate(summary_clean = str_replace(summary, pattern = "(^[:space:]?)", replacement = ""))
mass_shootings %<>% mutate(summary_clean = str_replace(summary, pattern = "([:space:]?$)", replacement = ""))
mass_shootings %<>% mutate(summary_clean = gsub("\r?\n|\r", " ", summary_clean)) # get rid of line brakes
mass_shootings %<>% mutate(summary_clean = str_replace_all(summary_clean, pattern = "([:space:]+)", replacement = " "))
## Split into sentences
sentences <- mass_shootings %>% unnest_tokens(output = sentence, input = summary_clean, token = "sentences")
str(sentences)
mass_sentences <- sentences %>% group_by(year) %>% mutate(linenumber = row_number()) %>% ungroup
## Creating tidy text one row per word data frame
tidy_mass <- mass_sentences %>% unnest_tokens(output = word, input = sentence)
data("stop_words")
library(tm)
# Also get stopwords from tm package
new_stops <- bind_rows(data.frame(word = stopwords("en"), lexicon = c("custom")), stop_words)
tidy_mass <- tidy_mass %>% anti_join(new_stops)
# Psychological descriptions ----------------------------------------------
## Normalize and clean text
# Remove white space in the beginning and end
mass_shootings %<>% mutate(mental_health_clean = str_replace(mental_health_details, pattern = "(^[:space:]?)", replacement = ""))
mass_shootings %<>% mutate(mental_health_clean = str_replace(mental_health_clean, pattern = "([:space:]?$)", replacement = ""))
mass_shootings %<>% mutate(mental_health_clean = gsub("\r?\n|\r", " ", mental_health_clean)) # get rid of line brakes
mass_shootings %<>% mutate(mental_health_clean = str_replace_all(mental_health_clean, pattern = "([:space:]+)", replacement = " "))
## Split into sentences
sentences_health <- mass_shootings %>% unnest_tokens(output = sentence, input = mental_health_clean, token = "sentences")
mass_sentences_health <- sentences_health %>% group_by(year) %>% mutate(linenumber = row_number()) %>% ungroup
## Creating tidy text one row per word data frame
tidy_mass_health <- mass_sentences_health %>% unnest_tokens(output = word, input = sentence)
## Removing stop words
tidy_mass_health <- tidy_mass_health %>% anti_join(new_stops)
| /mass_shootings_app/global.R | no_license | mraess/mass_shootings | R | false | false | 8,051 | r |
# Fetching data from - Mother Jones - Mass Shootings Database, 198 --------
library(tidyverse)
library(googlesheets)
library(magrittr)
library(plotly)
library(conflicted)
library(ggthemes)
library(stringr); library(stringi)
library(tidytext)
library(tm)
filter <- dplyr::filter # resolves filter-function conflict
as.Date <- base::as.Date
# register googlesheet from "Mother Jones - Mass Shootings Database 1982 - 2018
gs_mass_shootings <- gs_url(x = "https://docs.google.com/spreadsheets/d/1b9o6uDO18sLxBqPwl_Gh9bnhW-ev_dABH83M5Vb5L8o/edit#gid=0")
## Get data out of registered gs object
mass_shootings <- gs_mass_shootings %>% gs_read(ws = "Sheet1")
# Create state and city variables from location
mass_shootings %<>% separate(col = location, into = c("city", "state"), sep = ", ", remove = FALSE)
# Create data variable from date - some dates have 3/3/xx other have 3/3/xxxx
mass_shootings %<>% mutate(date = str_replace_all(date, pattern = "([0-9]{1,2}\\/[0-9]{1,2})\\/([0-9]{2}$)", replacement = "\\1\\/20\\2"))
mass_shootings %<>% mutate(date = base::as.Date(date, format = "%m/%d/%Y"))
## Change variable names with spaces or other symbols to underscore
names(mass_shootings) %<>% str_remove_all(pattern = "-|\\(|\\)") %>% str_replace_all(pattern = "[:space:]", replacement = "_") %>% str_replace_all(pattern = "__", replacement = "_")
## Clean up character-type variables
mass_shootings %<>% mutate(prior_signs_mental_health_issues = str_replace_all(prior_signs_mental_health_issues, pattern = "-", replacement = "TBD"),
mental_health_details = str_replace_all(mental_health_details, pattern = "-", replacement = "TBD"),
weapons_obtained_legally = str_replace_all(weapons_obtained_legally, pattern = "-", replacement = "TBD"),
where_obtained = str_replace_all(where_obtained, pattern = "-", replacement = "TBD"),
weapon_details = str_replace_all(mass_shootings$weapon_details, pattern = "^-$", replacement = "Unknown"),
weapons_obtained_legally = str_replace_all(weapons_obtained_legally, pattern = 'Yes \\(\\".+\\"\\)|^\\nYes', replacement = "Yes"),
weapons_obtained_legally = str_replace_all(weapons_obtained_legally, pattern = 'Kelley+', replacement = "Passed federal background check"))
## Clean up gender variable
mass_shootings %<>% mutate(gender = as.factor(gender))
mass_shootings %<>% mutate(gender = fct_collapse(gender, Male = c("M", "Male")))
## Collapse race variable
mass_shootings %<>% mutate(race = as.factor(race)) %>% mutate(race = fct_collapse(race, White = c("White", "white"), Black = c("Black", "black"), Unclear = c("-", "unclear")))
## Add key variable for plotting
mass_shootings %<>% mutate(key = row.names(mass_shootings))
# Plotly map - options ----------------------------------------------------
g <- list(
scope = 'usa',
projection = list(type = 'albers usa'),
showland = TRUE,
landcolor = toRGB("#8C96B3"),
subunitwidth = 1,
countrywidth = 1,
subunitcolor = toRGB("white"),
countrycolor = toRGB("white"),
showlakes = TRUE,
lakecolor = toRGB("#4C567A")
)
m <- list(
l = 0,
r = 5,
b = 5,
t = 30,
pad = 2
)
# Moving average - plot processing ----------------------------------------
library(lubridate)
library(tidyquant)
# Rolling mean
mass_rolling_mean <- mass_shootings %>%
tq_mutate(
# tq_mutate args
select = total_victims,
mutate_fun = rollapply,
# rollapply args
width = 6,
align = "right",
FUN = mean,
# mean args
na.rm = TRUE,
# tq_mutate args
col_rename = "roll_mean"
)
# Number boxes ------------------------------------------------------------
## Preprocessing for value boxes
summary <- base::summary
# Precent gender
percent_gender <- mass_shootings %>% group_by(gender) %>% summarise(count = n()) %>%
mutate(percent = (count/sum(count))*100)
# Precent race
mass_shootings %>% group_by(race) %>% summarise(count = n()) %>% mutate(percent = count/sum(count))
# Text processing ---------------------------------------------------------
## Regex pattern to detect names and potential middle names (M. or full name) - acounts for del, -, III, Mc, etc
name_pattern <- "(?<!the )[A-Z][a-z]+ [A-Z]?[']?[A-Z][a-z]+ [A-Z][a-z]+\\, |^[A-Z|a-z]+ ([A-Z]\\.? )?[A-Z][a-z]+\\, |^[A-Z][a-z]+ [A-Z][a-z]+ (del)? [A-Z][a-z]+\\,? |[A-Z|a-z]+ ([A-Z]\\.? )?[A-Z][a-z]+\\, |[A-Z|a-z]+ [A-Z][a-z]+ III\\, |[A-Z|a-z]+ Mc[A-Z][a-z]+\\, |[A-Z][a-z]+\\-[A-Z][a-z]+ [A-Z][a-z]+\\, |[A-Z][a-z]+ [A-Z][a-z]+ [A-Z][a-z]+\\,? " # perl=TRUE
names <- str_extract_all(mass_shootings$summary, pattern = name_pattern)
## Entry 61 has a weird character in it - manual entry Sulejman Talović grepl(mass_shootings$summary, pattern = "\U{0107}")
#names[61] <- "Sulejman Talović"
#
## New way of dealing with empty name slot that keeps moving
mass_shootings$name <- unlist(lapply(names, function(x) ifelse(is.null(x), NA, x)))
#mass_shootings$name <- names %>% map(1) %>% unlist(use.names = TRUE)# sapply(test, function(x) x[1])
#
# Delete all white spaces and commas in names
mass_shootings %<>% mutate(name = str_remove_all(name, pattern = ", $"))
# Dealing with shooter age ------------------------------------------------
mass_shootings$age <- as.numeric(mass_shootings$age_of_shooter)
# Text analysis -----------------------------------------------------------
## Normalize and clean text
# Remove white space in the beginning and end
mass_shootings %<>% mutate(summary_clean = str_replace(summary, pattern = "(^[:space:]?)", replacement = ""))
mass_shootings %<>% mutate(summary_clean = str_replace(summary, pattern = "([:space:]?$)", replacement = ""))
mass_shootings %<>% mutate(summary_clean = gsub("\r?\n|\r", " ", summary_clean)) # get rid of line brakes
mass_shootings %<>% mutate(summary_clean = str_replace_all(summary_clean, pattern = "([:space:]+)", replacement = " "))
## Split into sentences
sentences <- mass_shootings %>% unnest_tokens(output = sentence, input = summary_clean, token = "sentences")
str(sentences)
mass_sentences <- sentences %>% group_by(year) %>% mutate(linenumber = row_number()) %>% ungroup
## Creating tidy text one row per word data frame
tidy_mass <- mass_sentences %>% unnest_tokens(output = word, input = sentence)
data("stop_words")
library(tm)
# Also get stopwords from tm package
new_stops <- bind_rows(data.frame(word = stopwords("en"), lexicon = c("custom")), stop_words)
tidy_mass <- tidy_mass %>% anti_join(new_stops)
# Psychological descriptions ----------------------------------------------
## Normalize and clean text
# Remove white space in the beginning and end
mass_shootings %<>% mutate(mental_health_clean = str_replace(mental_health_details, pattern = "(^[:space:]?)", replacement = ""))
mass_shootings %<>% mutate(mental_health_clean = str_replace(mental_health_clean, pattern = "([:space:]?$)", replacement = ""))
mass_shootings %<>% mutate(mental_health_clean = gsub("\r?\n|\r", " ", mental_health_clean)) # get rid of line brakes
mass_shootings %<>% mutate(mental_health_clean = str_replace_all(mental_health_clean, pattern = "([:space:]+)", replacement = " "))
## Split into sentences
sentences_health <- mass_shootings %>% unnest_tokens(output = sentence, input = mental_health_clean, token = "sentences")
mass_sentences_health <- sentences_health %>% group_by(year) %>% mutate(linenumber = row_number()) %>% ungroup
## Creating tidy text one row per word data frame
tidy_mass_health <- mass_sentences_health %>% unnest_tokens(output = word, input = sentence)
## Removing stop words
tidy_mass_health <- tidy_mass_health %>% anti_join(new_stops)
|
#' transliterations_all
#'
#' A dataset containing transliterations
#'
#' @format A data frame with 38883 rows and 3 variables:
#' @source \url{https://github.com/rich-iannone/UnidecodeR/blob/master/data/transliterations_all.rda}
"transliterations_all"
| /R/transliterations_all.R | permissive | emlab-ucsb/startR | R | false | false | 253 | r | #' transliterations_all
#'
#' A dataset containing transliterations
#'
#' @format A data frame with 38883 rows and 3 variables:
#' @source \url{https://github.com/rich-iannone/UnidecodeR/blob/master/data/transliterations_all.rda}
"transliterations_all"
|
# try a custom filter function
set.seed(1)
dds <- makeExampleDESeqDataSet(n=200, m=4, betaSD=rep(c(0,2),c(150,50)))
dds <- DESeq(dds)
res <- results(dds)
method <- "BH"
alpha <- 0.1
customFilt <- function(res, filter, alpha, method) {
if (missing(filter)) {
filter <- res$baseMean
}
theta <- 0:10/10
cutoff <- quantile(filter, theta)
numRej <- sapply(cutoff, function(x) sum(p.adjust(res$pvalue[filter > x]) < alpha, na.rm=TRUE))
threshold <- theta[which(numRej == max(numRej))[1]]
res$padj <- numeric(nrow(res))
idx <- filter > quantile(filter, threshold)
res$padj[!idx] <- NA
res$padj[idx] <- p.adjust(res$pvalue[idx], method=method)
res
}
resCustom <- results(dds, filterFun=customFilt)
plot(res$padj, resCustom$padj);abline(0,1)
| /tests/testthat/test_custom_filt.R | no_license | 12379Monty/DESeq2 | R | false | false | 761 | r | # try a custom filter function
set.seed(1)
dds <- makeExampleDESeqDataSet(n=200, m=4, betaSD=rep(c(0,2),c(150,50)))
dds <- DESeq(dds)
res <- results(dds)
method <- "BH"
alpha <- 0.1
customFilt <- function(res, filter, alpha, method) {
if (missing(filter)) {
filter <- res$baseMean
}
theta <- 0:10/10
cutoff <- quantile(filter, theta)
numRej <- sapply(cutoff, function(x) sum(p.adjust(res$pvalue[filter > x]) < alpha, na.rm=TRUE))
threshold <- theta[which(numRej == max(numRej))[1]]
res$padj <- numeric(nrow(res))
idx <- filter > quantile(filter, threshold)
res$padj[!idx] <- NA
res$padj[idx] <- p.adjust(res$pvalue[idx], method=method)
res
}
resCustom <- results(dds, filterFun=customFilt)
plot(res$padj, resCustom$padj);abline(0,1)
|
setwd("~/Desktop/Data Portfolio/Cyclistic Case Study")
install.packages("tidyverse")
install.packages("lubridate")
install.packages("dplyr")
library(tidyverse)
library(lubridate)
library(dplyr)
library(ggplot2)
#Uploading my data sets
jun_2020 <- read_csv("202006-divvy-tripdata.csv")
jul_2020 <- read_csv("202007-divvy-tripdata.csv")
aug_2020 <- read_csv("202008-divvy-tripdata.csv")
sep_2020 <- read_csv("202009-divvy-tripdata.csv")
oct_2020 <- read_csv("202010-divvy-tripdata.csv")
nov_2020 <- read_csv("202011-divvy-tripdata.csv")
dec_2020 <- read_csv("202012-divvy-tripdata.csv")
jan_2021 <- read_csv("202101-divvy-tripdata.csv")
feb_2021 <- read_csv("202102-divvy-tripdata.csv")
mar_2021 <- read_csv("202103-divvy-tripdata.csv")
apr_2021 <- read_csv("202104-divvy-tripdata.csv")
may_2021 <- read_csv("202105-divvy-tripdata.csv")
#Making sure we have consistent column names
colnames(jun_2020)
colnames(jul_2020)
colnames(aug_2020)
colnames(sep_2020)
colnames(oct_2020)
colnames(nov_2020)
colnames(dec_2020)
colnames(jan_2021)
colnames(feb_2021)
colnames(mar_2021)
colnames(apr_2021)
colnames(may_2021)
#Looks like we do
#Now checking to make sure we have consistent data types for each column
str(jun_2020)
str(jul_2020)
str(aug_2020)
str(sep_2020)
str(oct_2020)
str(nov_2020)
str(dec_2020)
str(jan_2021)
str(feb_2021)
str(mar_2021)
str(apr_2021)
str(may_2021)
#converting start_station_id, end_station_id to chr
jun_2020 <- mutate(jun_2020, start_station_id = as.character(start_station_id),
end_station_id = as.character(end_station_id))
jul_2020 <- mutate(jul_2020, start_station_id = as.character(start_station_id),
end_station_id = as.character(end_station_id))
aug_2020 <- mutate(aug_2020, start_station_id = as.character(start_station_id),
end_station_id = as.character(end_station_id))
sep_2020 <- mutate(sep_2020, start_station_id = as.character(start_station_id),
end_station_id = as.character(end_station_id))
oct_2020 <- mutate(oct_2020, start_station_id = as.character(start_station_id),
end_station_id = as.character(end_station_id))
nov_2020 <- mutate(nov_2020, start_station_id = as.character(start_station_id),
end_station_id = as.character(end_station_id))
#double checking I changed these before I stack them
glimpse(jun_2020)
glimpse(jul_2020)
glimpse(aug_2020)
glimpse(sep_2020)
glimpse(oct_2020)
glimpse(nov_2020)
#Stacking all 12 months into one data frame
all_trips12 <- bind_rows(jun_2020,jul_2020,aug_2020,sep_2020,oct_2020,nov_2020,
dec_2020,jan_2021,feb_2021,mar_2021,apr_2021,may_2021)
#Inspecting our new data
glimpse(all_trips12)
colnames(all_trips12)
nrow(all_trips12)
dim(all_trips12)
head(all_trips12)
str(all_trips12)
summary(all_trips12)
#Observations for each
table(all_trips12$member_casual)
#Adding columns for date, month, day, and year
all_trips12$date <- as.Date(all_trips12$started_at) #The default format is yyyy-mm-dd
all_trips12$month <- format(as.Date(all_trips12$date), "%m")
all_trips12$day <- format(as.Date(all_trips12$date), "%d")
all_trips12$year <- format(as.Date(all_trips12$date), "%Y")
all_trips12$day_of_week <- format(as.Date(all_trips12$date), "%A")
#Adding a ride length column
all_trips12$ride_length <- difftime(all_trips12$ended_at,all_trips12$started_at)
#Double checking columns
str(all_trips12)
#check
is.factor(all_trips12$ride_length)
#Changing ride_length to numeric so we can better use it
all_trips12$ride_length <- as.numeric(as.character(all_trips12$ride_length))
#check
is.numeric(all_trips12$ride_length)
#checking for wonky data
min(all_trips12$ride_length)
#Removing data that doesn't make sense (such as the negative data that we have
# as a min)
#new version of data with underscore g for 'good'
all_trips12_g <- all_trips12[!(all_trips12$ride_length<0),]
#Some descriptive analysis (all in seconds)
#Gives shortest ride, longest ride, mean, median, and 1st & 3rd Quartiles
summary(all_trips12_g$ride_length)
#Comparing members and casual users
aggregate(all_trips12_g$ride_length ~ all_trips12_g$member_casual, FUN = mean)
aggregate(all_trips12_g$ride_length ~ all_trips12_g$member_casual, FUN = median)
aggregate(all_trips12_g$ride_length ~ all_trips12_g$member_casual, FUN = max)
aggregate(all_trips12_g$ride_length ~ all_trips12_g$member_casual, FUN = min)
#Average ride time each day for members vs casual users
aggregate(all_trips12_g$ride_length ~ all_trips12_g$member_casual +
all_trips12_g$day_of_week, FUN = mean)
#Putting days of the week in order
all_trips12_g$day_of_week <- ordered(all_trips12_g$day_of_week,
levels=c("Sunday", "Monday", "Tuesday",
"Wednesday", "Thursday", "Friday",
"Saturday"))
#Average ride time each day for members vs casual users (now in order)
aggregate(all_trips12_g$ride_length ~ all_trips12_g$member_casual +
all_trips12_g$day_of_week, FUN = mean)
#Analyzing ridership data by type and weekday
all_trips12_g %>%
mutate(weekday = wday(started_at, label = TRUE)) %>% #creates weekday field using wday()
group_by(member_casual, weekday) %>% #groups by user type and weekday
summarise(number_of_rides = n() #calculates the number of rides
,average_duration = mean(ride_length)) %>% #calculates the average duration
arrange(member_casual, weekday) #sorts
#Creating a visual for number of rides by rider type
all_trips12_g %>%
mutate(weekday = wday(started_at, label = TRUE)) %>%
group_by(member_casual, weekday) %>%
summarise(number_of_rides = n()
,average_duration = mean(ride_length)) %>%
arrange(member_casual, weekday) %>%
ggplot(aes(x = weekday, y = number_of_rides, fill = member_casual)) +
geom_col(position = "dodge") + labs(title = "Number of Rides by Rider Type per Day",
caption = "Data Source: Divvy (Chicago's Bike Share Program)",
x = "Day of the Week",
y = "Number of Rides")
#Visual for average duration
all_trips12_g %>%
mutate(weekday = wday(started_at, label = TRUE)) %>%
group_by(member_casual, weekday) %>%
summarise(number_of_rides = n()
,average_duration = mean(ride_length)) %>%
arrange(member_casual, weekday) %>%
ggplot(aes(x = weekday, y = average_duration, fill = member_casual)) +
geom_col(position = "dodge") + scale_y_continuous("Average Duration") +
labs(title = "Average Ride Duration by Rider Type per Day", caption = "Data Source: Divvy (Chicago's Bike Share Program)",
x = "Day of The Week")
#Creating a csv file I can use for further visualization
ride_counts <- aggregate(all_trips12_g$ride_length ~ all_trips12_g$member_casual +
all_trips12_g$day_of_week, FUN = mean)
write.csv(ride_counts, file = '~/Desktop/Data Portfolio/Cyclistic Case Study/cyclistic_avg_ride_length.csv')
| /cyclistic_analysis.R | no_license | iwill434/data-portfolio | R | false | false | 7,159 | r | setwd("~/Desktop/Data Portfolio/Cyclistic Case Study")
install.packages("tidyverse")
install.packages("lubridate")
install.packages("dplyr")
library(tidyverse)
library(lubridate)
library(dplyr)
library(ggplot2)
#Uploading my data sets
jun_2020 <- read_csv("202006-divvy-tripdata.csv")
jul_2020 <- read_csv("202007-divvy-tripdata.csv")
aug_2020 <- read_csv("202008-divvy-tripdata.csv")
sep_2020 <- read_csv("202009-divvy-tripdata.csv")
oct_2020 <- read_csv("202010-divvy-tripdata.csv")
nov_2020 <- read_csv("202011-divvy-tripdata.csv")
dec_2020 <- read_csv("202012-divvy-tripdata.csv")
jan_2021 <- read_csv("202101-divvy-tripdata.csv")
feb_2021 <- read_csv("202102-divvy-tripdata.csv")
mar_2021 <- read_csv("202103-divvy-tripdata.csv")
apr_2021 <- read_csv("202104-divvy-tripdata.csv")
may_2021 <- read_csv("202105-divvy-tripdata.csv")
#Making sure we have consistent column names
colnames(jun_2020)
colnames(jul_2020)
colnames(aug_2020)
colnames(sep_2020)
colnames(oct_2020)
colnames(nov_2020)
colnames(dec_2020)
colnames(jan_2021)
colnames(feb_2021)
colnames(mar_2021)
colnames(apr_2021)
colnames(may_2021)
#Looks like we do
#Now checking to make sure we have consistent data types for each column
str(jun_2020)
str(jul_2020)
str(aug_2020)
str(sep_2020)
str(oct_2020)
str(nov_2020)
str(dec_2020)
str(jan_2021)
str(feb_2021)
str(mar_2021)
str(apr_2021)
str(may_2021)
#converting start_station_id, end_station_id to chr
jun_2020 <- mutate(jun_2020, start_station_id = as.character(start_station_id),
end_station_id = as.character(end_station_id))
jul_2020 <- mutate(jul_2020, start_station_id = as.character(start_station_id),
end_station_id = as.character(end_station_id))
aug_2020 <- mutate(aug_2020, start_station_id = as.character(start_station_id),
end_station_id = as.character(end_station_id))
sep_2020 <- mutate(sep_2020, start_station_id = as.character(start_station_id),
end_station_id = as.character(end_station_id))
oct_2020 <- mutate(oct_2020, start_station_id = as.character(start_station_id),
end_station_id = as.character(end_station_id))
nov_2020 <- mutate(nov_2020, start_station_id = as.character(start_station_id),
end_station_id = as.character(end_station_id))
#double checking I changed these before I stack them
glimpse(jun_2020)
glimpse(jul_2020)
glimpse(aug_2020)
glimpse(sep_2020)
glimpse(oct_2020)
glimpse(nov_2020)
#Stacking all 12 months into one data frame
all_trips12 <- bind_rows(jun_2020,jul_2020,aug_2020,sep_2020,oct_2020,nov_2020,
dec_2020,jan_2021,feb_2021,mar_2021,apr_2021,may_2021)
#Inspecting our new data
glimpse(all_trips12)
colnames(all_trips12)
nrow(all_trips12)
dim(all_trips12)
head(all_trips12)
str(all_trips12)
summary(all_trips12)
#Observations for each
table(all_trips12$member_casual)
#Adding columns for date, month, day, and year
all_trips12$date <- as.Date(all_trips12$started_at) #The default format is yyyy-mm-dd
all_trips12$month <- format(as.Date(all_trips12$date), "%m")
all_trips12$day <- format(as.Date(all_trips12$date), "%d")
all_trips12$year <- format(as.Date(all_trips12$date), "%Y")
all_trips12$day_of_week <- format(as.Date(all_trips12$date), "%A")
#Adding a ride length column
all_trips12$ride_length <- difftime(all_trips12$ended_at,all_trips12$started_at)
#Double checking columns
str(all_trips12)
#check
is.factor(all_trips12$ride_length)
#Changing ride_length to numeric so we can better use it
all_trips12$ride_length <- as.numeric(as.character(all_trips12$ride_length))
#check
is.numeric(all_trips12$ride_length)
#checking for wonky data
min(all_trips12$ride_length)
#Removing data that doesn't make sense (such as the negative data that we have
# as a min)
#new version of data with underscore g for 'good'
all_trips12_g <- all_trips12[!(all_trips12$ride_length<0),]
#Some descriptive analysis (all in seconds)
#Gives shortest ride, longest ride, mean, median, and 1st & 3rd Quartiles
summary(all_trips12_g$ride_length)
#Comparing members and casual users
aggregate(all_trips12_g$ride_length ~ all_trips12_g$member_casual, FUN = mean)
aggregate(all_trips12_g$ride_length ~ all_trips12_g$member_casual, FUN = median)
aggregate(all_trips12_g$ride_length ~ all_trips12_g$member_casual, FUN = max)
aggregate(all_trips12_g$ride_length ~ all_trips12_g$member_casual, FUN = min)
#Average ride time each day for members vs casual users
aggregate(all_trips12_g$ride_length ~ all_trips12_g$member_casual +
all_trips12_g$day_of_week, FUN = mean)
#Putting days of the week in order
all_trips12_g$day_of_week <- ordered(all_trips12_g$day_of_week,
levels=c("Sunday", "Monday", "Tuesday",
"Wednesday", "Thursday", "Friday",
"Saturday"))
#Average ride time each day for members vs casual users (now in order)
aggregate(all_trips12_g$ride_length ~ all_trips12_g$member_casual +
all_trips12_g$day_of_week, FUN = mean)
#Analyzing ridership data by type and weekday
all_trips12_g %>%
mutate(weekday = wday(started_at, label = TRUE)) %>% #creates weekday field using wday()
group_by(member_casual, weekday) %>% #groups by user type and weekday
summarise(number_of_rides = n() #calculates the number of rides
,average_duration = mean(ride_length)) %>% #calculates the average duration
arrange(member_casual, weekday) #sorts
#Creating a visual for number of rides by rider type
all_trips12_g %>%
mutate(weekday = wday(started_at, label = TRUE)) %>%
group_by(member_casual, weekday) %>%
summarise(number_of_rides = n()
,average_duration = mean(ride_length)) %>%
arrange(member_casual, weekday) %>%
ggplot(aes(x = weekday, y = number_of_rides, fill = member_casual)) +
geom_col(position = "dodge") + labs(title = "Number of Rides by Rider Type per Day",
caption = "Data Source: Divvy (Chicago's Bike Share Program)",
x = "Day of the Week",
y = "Number of Rides")
#Visual for average duration
all_trips12_g %>%
mutate(weekday = wday(started_at, label = TRUE)) %>%
group_by(member_casual, weekday) %>%
summarise(number_of_rides = n()
,average_duration = mean(ride_length)) %>%
arrange(member_casual, weekday) %>%
ggplot(aes(x = weekday, y = average_duration, fill = member_casual)) +
geom_col(position = "dodge") + scale_y_continuous("Average Duration") +
labs(title = "Average Ride Duration by Rider Type per Day", caption = "Data Source: Divvy (Chicago's Bike Share Program)",
x = "Day of The Week")
#Creating a csv file I can use for further visualization
ride_counts <- aggregate(all_trips12_g$ride_length ~ all_trips12_g$member_casual +
all_trips12_g$day_of_week, FUN = mean)
write.csv(ride_counts, file = '~/Desktop/Data Portfolio/Cyclistic Case Study/cyclistic_avg_ride_length.csv')
|
\alias{GtkAccessible}
\name{GtkAccessible}
\title{GtkAccessible}
\description{Accessibility support for widgets}
\section{Methods and Functions}{
\code{\link{gtkAccessibleConnectWidgetDestroyed}(object)}\cr
}
\section{Hierarchy}{\preformatted{GObject
+----AtkObject
+----GtkAccessible}}
\section{Structures}{\describe{\item{\verb{GtkAccessible}}{
\emph{undocumented
}
}}}
\references{\url{http://library.gnome.org/devel//gtk/GtkAccessible.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/GtkAccessible.Rd | no_license | hjy1210/RGtk2 | R | false | false | 531 | rd | \alias{GtkAccessible}
\name{GtkAccessible}
\title{GtkAccessible}
\description{Accessibility support for widgets}
\section{Methods and Functions}{
\code{\link{gtkAccessibleConnectWidgetDestroyed}(object)}\cr
}
\section{Hierarchy}{\preformatted{GObject
+----AtkObject
+----GtkAccessible}}
\section{Structures}{\describe{\item{\verb{GtkAccessible}}{
\emph{undocumented
}
}}}
\references{\url{http://library.gnome.org/devel//gtk/GtkAccessible.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
fecalPlotlySingleStationUI <- function(id){
ns <- NS(id)
tagList(
wellPanel(
h4(strong('Single Station Data Visualization')),
fluidRow(column(3,uiOutput(ns('oneStationSelectionUI'))),
column(1),
column(3,actionButton(ns('reviewData'),"Review Raw Parameter Data",class='btn-block', width = '250px'))),
helpText('All data presented in the interactive plot is raw data. Rounding rules are appropriately applied to the
assessment functions utilized by the application.'),
plotlyOutput(ns('plotly')) )
)
}
fecalPlotlySingleStation <- function(input,output,session, AUdata, stationSelectedAbove){
ns <- session$ns
# Select One station for individual review
output$oneStationSelectionUI <- renderUI({
req(AUdata)
selectInput(ns('oneStationSelection'),strong('Select Station to Review'),
choices= sort(unique(c(stationSelectedAbove(),AUdata()$FDT_STA_ID))), # Change this based on stationSelectedAbove
width='200px', selected = stationSelectedAbove())})
oneStation <- reactive({
req(ns(input$oneStationSelection))
filter(AUdata(),FDT_STA_ID %in% input$oneStationSelection) %>%
filter(!is.na(FECAL_COLI))})
# Button to visualize modal table of available parameter data
observeEvent(input$reviewData,{
showModal(modalDialog(
title="Review Raw Data for Selected Station and Parameter",
helpText('This table subsets the conventionals raw data by station selected in Single Station Visualization Section drop down and
parameter currently reviewing. Scroll right to see the raw parameter values and any data collection comments. Data analyzed
by app is highlighted in gray (all DEQ data and non agency/citizen monitoring Level III), data counted by app and noted in
comment fields is highlighed in yellow (non agency/citizen monitoring Level II), and data NOT CONSIDERED in app is noted in
orange (non agency/citizen monitoring Level I).'),
DT::dataTableOutput(ns('parameterData')),
easyClose = TRUE)) })
# modal parameter data
output$parameterData <- DT::renderDataTable({
req(oneStation())
parameterFilter <- dplyr::select(oneStation(), FDT_STA_ID:FDT_COMMENT, FECAL_COLI, RMK_FECAL_COLI, LEVEL_FECAL_COLI)
DT::datatable(parameterFilter, rownames = FALSE,
options= list(dom= 't', pageLength = nrow(parameterFilter), scrollX = TRUE, scrollY = "400px", dom='t'),
selection = 'none') %>%
formatStyle(c('FECAL_COLI','RMK_FECAL_COLI', 'LEVEL_FECAL_COLI'), 'LEVEL_FECAL_COLI', backgroundColor = styleEqual(c('Level II', 'Level I'), c('yellow','orange'), default = 'lightgray'))
})
output$plotly <- renderPlotly({
req(input$oneStationSelection, oneStation())
dat <- oneStation()
dat$SampleDate <- as.POSIXct(dat$FDT_DATE_TIME, format="%m/%d/%y")
plot_ly(data=dat)%>%
add_markers(data=dat, x= ~SampleDate, y= ~FECAL_COLI,mode = 'scatter', name="Fecal Coliform (CFU / 100 mL)",marker = list(color= '#535559'),
hoverinfo="text",text=~paste(sep="<br>",
paste("Date: ",SampleDate),
paste("Depth: ",FDT_DEPTH, "m"),
paste("Fecal Coliform: ",FECAL_COLI,"CFU / 100 mL")))%>%
layout(showlegend=FALSE,
yaxis=list(title="Fecal Coliform (CFU / 100 mL)"),
xaxis=list(title="Sample Date",tickfont = list(size = 10)))
})
} | /4.RiverineApplication/appModulesAndFunctions/fecalColiformModule.R | no_license | EmmaVJones/IR2022 | R | false | false | 3,641 | r |
fecalPlotlySingleStationUI <- function(id){
ns <- NS(id)
tagList(
wellPanel(
h4(strong('Single Station Data Visualization')),
fluidRow(column(3,uiOutput(ns('oneStationSelectionUI'))),
column(1),
column(3,actionButton(ns('reviewData'),"Review Raw Parameter Data",class='btn-block', width = '250px'))),
helpText('All data presented in the interactive plot is raw data. Rounding rules are appropriately applied to the
assessment functions utilized by the application.'),
plotlyOutput(ns('plotly')) )
)
}
fecalPlotlySingleStation <- function(input,output,session, AUdata, stationSelectedAbove){
ns <- session$ns
# Select One station for individual review
output$oneStationSelectionUI <- renderUI({
req(AUdata)
selectInput(ns('oneStationSelection'),strong('Select Station to Review'),
choices= sort(unique(c(stationSelectedAbove(),AUdata()$FDT_STA_ID))), # Change this based on stationSelectedAbove
width='200px', selected = stationSelectedAbove())})
oneStation <- reactive({
req(ns(input$oneStationSelection))
filter(AUdata(),FDT_STA_ID %in% input$oneStationSelection) %>%
filter(!is.na(FECAL_COLI))})
# Button to visualize modal table of available parameter data
observeEvent(input$reviewData,{
showModal(modalDialog(
title="Review Raw Data for Selected Station and Parameter",
helpText('This table subsets the conventionals raw data by station selected in Single Station Visualization Section drop down and
parameter currently reviewing. Scroll right to see the raw parameter values and any data collection comments. Data analyzed
by app is highlighted in gray (all DEQ data and non agency/citizen monitoring Level III), data counted by app and noted in
comment fields is highlighed in yellow (non agency/citizen monitoring Level II), and data NOT CONSIDERED in app is noted in
orange (non agency/citizen monitoring Level I).'),
DT::dataTableOutput(ns('parameterData')),
easyClose = TRUE)) })
# modal parameter data
output$parameterData <- DT::renderDataTable({
req(oneStation())
parameterFilter <- dplyr::select(oneStation(), FDT_STA_ID:FDT_COMMENT, FECAL_COLI, RMK_FECAL_COLI, LEVEL_FECAL_COLI)
DT::datatable(parameterFilter, rownames = FALSE,
options= list(dom= 't', pageLength = nrow(parameterFilter), scrollX = TRUE, scrollY = "400px", dom='t'),
selection = 'none') %>%
formatStyle(c('FECAL_COLI','RMK_FECAL_COLI', 'LEVEL_FECAL_COLI'), 'LEVEL_FECAL_COLI', backgroundColor = styleEqual(c('Level II', 'Level I'), c('yellow','orange'), default = 'lightgray'))
})
output$plotly <- renderPlotly({
req(input$oneStationSelection, oneStation())
dat <- oneStation()
dat$SampleDate <- as.POSIXct(dat$FDT_DATE_TIME, format="%m/%d/%y")
plot_ly(data=dat)%>%
add_markers(data=dat, x= ~SampleDate, y= ~FECAL_COLI,mode = 'scatter', name="Fecal Coliform (CFU / 100 mL)",marker = list(color= '#535559'),
hoverinfo="text",text=~paste(sep="<br>",
paste("Date: ",SampleDate),
paste("Depth: ",FDT_DEPTH, "m"),
paste("Fecal Coliform: ",FECAL_COLI,"CFU / 100 mL")))%>%
layout(showlegend=FALSE,
yaxis=list(title="Fecal Coliform (CFU / 100 mL)"),
xaxis=list(title="Sample Date",tickfont = list(size = 10)))
})
} |
## As prompted in the homeworkd, makeCacheMatrix creates a special
## matrix object that can cache its inverse
## Below is an example of how to define a matrix and use these
## functions to get the matrix inverse:
## > source('cachematrix.R')
## > z <- matrix( c(1, 2, 3, 4), nrow=2, ncol=2)
## > y <- makeCacheMatrix(z)
## > a <- cacheSolve(y)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
set_inverse <- function(solve) m <<- solve
get_inverse <- function() m
list(set = set, get = get,
set_inverse = set_inverse,
get_inverse = get_inverse)
}
## Also as prompted in the homework, cacheSolve computed the
## inverse of the special "matrix" returned by makeCachematrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$get_inverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$set_inverse(m)
m
}
| /cachematrix.R | no_license | AthenaStacy/ProgrammingAssignment2 | R | false | false | 1,110 | r | ## As prompted in the homeworkd, makeCacheMatrix creates a special
## matrix object that can cache its inverse
## Below is an example of how to define a matrix and use these
## functions to get the matrix inverse:
## > source('cachematrix.R')
## > z <- matrix( c(1, 2, 3, 4), nrow=2, ncol=2)
## > y <- makeCacheMatrix(z)
## > a <- cacheSolve(y)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
set_inverse <- function(solve) m <<- solve
get_inverse <- function() m
list(set = set, get = get,
set_inverse = set_inverse,
get_inverse = get_inverse)
}
## Also as prompted in the homework, cacheSolve computed the
## inverse of the special "matrix" returned by makeCachematrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$get_inverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$set_inverse(m)
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{cloudhsm}
\alias{cloudhsm}
\title{Amazon CloudHSM}
\usage{
cloudhsm(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
AWS CloudHSM Service
This is documentation for \strong{AWS CloudHSM Classic}. For more
information, see \href{https://aws.amazon.com/cloudhsm/faqs/}{AWS CloudHSM Classic FAQs}, the \href{https://docs.aws.amazon.com/cloudhsm/classic/userguide/}{AWS CloudHSM Classic User Guide},
and the \href{https://docs.aws.amazon.com/cloudhsm/classic/APIReference/}{AWS CloudHSM Classic API Reference}.
\strong{For information about the current version of AWS CloudHSM}, see \href{https://aws.amazon.com/cloudhsm/}{AWS CloudHSM}, the \href{https://docs.aws.amazon.com/cloudhsm/latest/userguide/}{AWS CloudHSM User Guide}, and the
\href{https://docs.aws.amazon.com/cloudhsm/latest/APIReference/}{AWS CloudHSM API Reference}.
}
\section{Service syntax}{
\preformatted{svc <- cloudhsm(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=cloudhsm_add_tags_to_resource]{add_tags_to_resource} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_create_hapg]{create_hapg} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_create_hsm]{create_hsm} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_create_luna_client]{create_luna_client} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_delete_hapg]{delete_hapg} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_delete_hsm]{delete_hsm} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_delete_luna_client]{delete_luna_client} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_describe_hapg]{describe_hapg} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_describe_hsm]{describe_hsm} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_describe_luna_client]{describe_luna_client} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_get_config]{get_config} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_list_available_zones]{list_available_zones} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_list_hapgs]{list_hapgs} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_list_hsms]{list_hsms} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_list_luna_clients]{list_luna_clients} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_list_tags_for_resource]{list_tags_for_resource} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_modify_hapg]{modify_hapg} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_modify_hsm]{modify_hsm} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_modify_luna_client]{modify_luna_client} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_remove_tags_from_resource]{remove_tags_from_resource} \tab This is documentation for AWS CloudHSM Classic
}
}
\examples{
\dontrun{
svc <- cloudhsm()
svc$add_tags_to_resource(
Foo = 123
)
}
}
| /cran/paws/man/cloudhsm.Rd | permissive | TWarczak/paws | R | false | true | 3,534 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{cloudhsm}
\alias{cloudhsm}
\title{Amazon CloudHSM}
\usage{
cloudhsm(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
AWS CloudHSM Service
This is documentation for \strong{AWS CloudHSM Classic}. For more
information, see \href{https://aws.amazon.com/cloudhsm/faqs/}{AWS CloudHSM Classic FAQs}, the \href{https://docs.aws.amazon.com/cloudhsm/classic/userguide/}{AWS CloudHSM Classic User Guide},
and the \href{https://docs.aws.amazon.com/cloudhsm/classic/APIReference/}{AWS CloudHSM Classic API Reference}.
\strong{For information about the current version of AWS CloudHSM}, see \href{https://aws.amazon.com/cloudhsm/}{AWS CloudHSM}, the \href{https://docs.aws.amazon.com/cloudhsm/latest/userguide/}{AWS CloudHSM User Guide}, and the
\href{https://docs.aws.amazon.com/cloudhsm/latest/APIReference/}{AWS CloudHSM API Reference}.
}
\section{Service syntax}{
\preformatted{svc <- cloudhsm(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=cloudhsm_add_tags_to_resource]{add_tags_to_resource} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_create_hapg]{create_hapg} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_create_hsm]{create_hsm} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_create_luna_client]{create_luna_client} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_delete_hapg]{delete_hapg} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_delete_hsm]{delete_hsm} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_delete_luna_client]{delete_luna_client} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_describe_hapg]{describe_hapg} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_describe_hsm]{describe_hsm} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_describe_luna_client]{describe_luna_client} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_get_config]{get_config} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_list_available_zones]{list_available_zones} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_list_hapgs]{list_hapgs} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_list_hsms]{list_hsms} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_list_luna_clients]{list_luna_clients} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_list_tags_for_resource]{list_tags_for_resource} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_modify_hapg]{modify_hapg} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_modify_hsm]{modify_hsm} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_modify_luna_client]{modify_luna_client} \tab This is documentation for AWS CloudHSM Classic\cr
\link[=cloudhsm_remove_tags_from_resource]{remove_tags_from_resource} \tab This is documentation for AWS CloudHSM Classic
}
}
\examples{
\dontrun{
svc <- cloudhsm()
svc$add_tags_to_resource(
Foo = 123
)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feature.R
\name{FEATURESELECTION}
\alias{FEATURESELECTION}
\title{Classification with Feature selection}
\usage{
FEATURESELECTION(
train,
labels,
algorithm = c("ranking", "forward", "backward", "exhaustive"),
unieval = if (algorithm[1] == "ranking") c("fisher", "fstat", "relief", "inertiaratio")
else NULL,
uninb = NULL,
unithreshold = NULL,
multieval = if (algorithm[1] == "ranking") NULL else c("cfs", "fstat", "inertiaratio",
"wrapper"),
wrapmethod = NULL,
mainmethod = wrapmethod,
tune = FALSE,
...
)
}
\arguments{
\item{train}{The training set (description), as a \code{data.frame}.}
\item{labels}{Class labels of the training set (\code{vector} or \code{factor}).}
\item{algorithm}{The feature selection algorithm.}
\item{unieval}{The (univariate) evaluation criterion. \code{uninb}, \code{unithreshold} or \code{multieval} must be specified.}
\item{uninb}{The number of selected feature (univariate evaluation).}
\item{unithreshold}{The threshold for selecting feature (univariate evaluation).}
\item{multieval}{The (multivariate) evaluation criterion.}
\item{wrapmethod}{The classification method used for the wrapper evaluation.}
\item{mainmethod}{The final method used for data classification. If a wrapper evaluation is used, the same classification method should be used.}
\item{tune}{If true, the function returns paramters instead of a classification model.}
\item{...}{Other parameters.}
}
\description{
Apply a classification method after a subset of features has been selected.
}
\examples{
\dontrun{
require (datasets)
data (iris)
FEATURESELECTION (iris [, -5], iris [, 5], uninb = 2, mainmethod = LDA)
}
}
\seealso{
\code{\link{selectfeatures}}, \code{\link{predict.selection}}, \code{\link{selection-class}}
}
| /man/FEATURESELECTION.Rd | no_license | cran/fdm2id | R | false | true | 1,849 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feature.R
\name{FEATURESELECTION}
\alias{FEATURESELECTION}
\title{Classification with Feature selection}
\usage{
FEATURESELECTION(
train,
labels,
algorithm = c("ranking", "forward", "backward", "exhaustive"),
unieval = if (algorithm[1] == "ranking") c("fisher", "fstat", "relief", "inertiaratio")
else NULL,
uninb = NULL,
unithreshold = NULL,
multieval = if (algorithm[1] == "ranking") NULL else c("cfs", "fstat", "inertiaratio",
"wrapper"),
wrapmethod = NULL,
mainmethod = wrapmethod,
tune = FALSE,
...
)
}
\arguments{
\item{train}{The training set (description), as a \code{data.frame}.}
\item{labels}{Class labels of the training set (\code{vector} or \code{factor}).}
\item{algorithm}{The feature selection algorithm.}
\item{unieval}{The (univariate) evaluation criterion. \code{uninb}, \code{unithreshold} or \code{multieval} must be specified.}
\item{uninb}{The number of selected feature (univariate evaluation).}
\item{unithreshold}{The threshold for selecting feature (univariate evaluation).}
\item{multieval}{The (multivariate) evaluation criterion.}
\item{wrapmethod}{The classification method used for the wrapper evaluation.}
\item{mainmethod}{The final method used for data classification. If a wrapper evaluation is used, the same classification method should be used.}
\item{tune}{If true, the function returns paramters instead of a classification model.}
\item{...}{Other parameters.}
}
\description{
Apply a classification method after a subset of features has been selected.
}
\examples{
\dontrun{
require (datasets)
data (iris)
FEATURESELECTION (iris [, -5], iris [, 5], uninb = 2, mainmethod = LDA)
}
}
\seealso{
\code{\link{selectfeatures}}, \code{\link{predict.selection}}, \code{\link{selection-class}}
}
|
\name{setAsymptoticCovMat}
\alias{setAsymptoticCovMat}
\title{Set the asymptotic covariance matrix of a fitted HMM}
\description{This function sets the empirical asymptotic covariance matrix of the fitted HMM}
\usage{
setAsymptoticCovMat(HMMFit, asymptCovMat)
}
\arguments{
\item{HMMFit}{a HMMFitClass object}
\item{asymptCovMat}{The covariance matrix of the fitted model}
}
\value{The HMMFit object}
\seealso{asymptoticCovMat}
| /man/setAsymptoticCovMat.rd | no_license | Mthrun/RHmm | R | false | false | 444 | rd | \name{setAsymptoticCovMat}
\alias{setAsymptoticCovMat}
\title{Set the asymptotic covariance matrix of a fitted HMM}
\description{This function sets the empirical asymptotic covariance matrix of the fitted HMM}
\usage{
setAsymptoticCovMat(HMMFit, asymptCovMat)
}
\arguments{
\item{HMMFit}{a HMMFitClass object}
\item{asymptCovMat}{The covariance matrix of the fitted model}
}
\value{The HMMFit object}
\seealso{asymptoticCovMat}
|
rm(list = ls())
getwd()
library("ggfortify")
install.packages("broom")
library("broom")
library("tidyverse")
tidyverse_update()
library("nlme")
#Question 10
library(readr)
A <- read_csv("datasets/exams/aphids.csv")
View(A)
model02 <- lme(fixed = thorax_length ~ 1,
random = ~1|gall_number, data = A)
model02_varcomp <- VarCorr(model02)
model02_varcomp
gall_number = pdLogChol(1)
varAmong <- as.numeric( model02_varcomp[1,1] )
varWithin <- as.numeric( model02_varcomp[2,1] )
repeatability <- varAmong / (varAmong + varWithin)
summary(model02)
#Question 11
library(readr)
glucose <- read_csv("datasets/exams/glucose.csv")
View(glucose)
#Question 12
library(readr)
DV <- read_csv("datasets/exams/DriverVision.csv")
View(DV)
model03 <- lm(Age ~ Distance, data = DV)
autoplot(model03, smooth.colour = NA)
ggplot(data = DV)+
geom_point(aes(x = Age, y = resid(model03)))
summary(model03)
#### Code runs perfectly 5/5 #### | /Assignments/Assignments/2019-11-19_Exam3.R | no_license | BIO375/troutman_michael | R | false | false | 953 | r | rm(list = ls())
getwd()
library("ggfortify")
install.packages("broom")
library("broom")
library("tidyverse")
tidyverse_update()
library("nlme")
#Question 10
library(readr)
A <- read_csv("datasets/exams/aphids.csv")
View(A)
model02 <- lme(fixed = thorax_length ~ 1,
random = ~1|gall_number, data = A)
model02_varcomp <- VarCorr(model02)
model02_varcomp
gall_number = pdLogChol(1)
varAmong <- as.numeric( model02_varcomp[1,1] )
varWithin <- as.numeric( model02_varcomp[2,1] )
repeatability <- varAmong / (varAmong + varWithin)
summary(model02)
#Question 11
library(readr)
glucose <- read_csv("datasets/exams/glucose.csv")
View(glucose)
#Question 12
library(readr)
DV <- read_csv("datasets/exams/DriverVision.csv")
View(DV)
model03 <- lm(Age ~ Distance, data = DV)
autoplot(model03, smooth.colour = NA)
ggplot(data = DV)+
geom_point(aes(x = Age, y = resid(model03)))
summary(model03)
#### Code runs perfectly 5/5 #### |
### lecture on 02.03.2015
# wczytanie pakietów ------------------------------------------------------
library(dplyr)
library(ggplot2)
library(ggvis)
library(tidyr)
library(XLConnect)
library(scales)
# wczytanie danych --------------------------------------------------------
wb <- loadWorkbook('WIRDS/datasets/gospodarstwa.xls')
gosp <- readWorksheet(wb,'gospodarstwa')
vars <- readWorksheet(wb,'opis cech')
vars_labels <- readWorksheet(wb,'opis wariantów cech')
gosp <- tbl_df(gosp)
# podsumowania ------------------------------------------------------------
gosp %>% count(woj)
gosp %>% count(woj,sort=T)
gosp %>% count(woj,klm,sort=T)
# podstawowe wykresy w R --------------------------------------------------
gosp$woj %>%
table() %>%
barplot()
# podstawowe wykresy w ggplot2 --------------------------------------------
ggplot(data=gosp,
aes(x = woj)) +
geom_bar()
ggplot(data=gosp,
aes(x = reorder(woj,woj,length))) +
geom_bar()
ggplot(data=gosp,
aes(x = as.factor(klm))) +
geom_bar() +
facet_wrap(~woj)
ggplot(data=gosp,
aes(x = as.factor(klm))) +
geom_bar() +
facet_grid(~woj)
ggplot(data=gosp,
aes(x = as.factor(d63))) +
geom_bar() +
facet_wrap(~woj)
# sytuacja materialna -----------------------------------------------------
gosp %>%
mutate(d61 = factor(x = d61,
levels = 1:5,
labels = c('Bardzo dobra',
'Raczej dobra',
'Przeciętna',
'Raczej zła',
'Zła'),
ordered = T)) %>%
count(woj,d61) %>%
mutate(p = n/sum(n)) %>%
ggplot(data = .,
aes(x = woj,
y = p,
fill = d61)) +
geom_bar(stat = 'identity',
colour = 'black') +
scale_y_continuous(labels = percent) +
scale_fill_brewer(palette = 'Greens') +
theme_bw() +
coord_flip()
| /WIRDS/codes/02.03.2015 - wizualizacja danych jakosciowych.R | no_license | tomasznierychly/Dydaktyka | R | false | false | 1,991 | r | ### lecture on 02.03.2015
# wczytanie pakietów ------------------------------------------------------
library(dplyr)
library(ggplot2)
library(ggvis)
library(tidyr)
library(XLConnect)
library(scales)
# wczytanie danych --------------------------------------------------------
wb <- loadWorkbook('WIRDS/datasets/gospodarstwa.xls')
gosp <- readWorksheet(wb,'gospodarstwa')
vars <- readWorksheet(wb,'opis cech')
vars_labels <- readWorksheet(wb,'opis wariantów cech')
gosp <- tbl_df(gosp)
# podsumowania ------------------------------------------------------------
gosp %>% count(woj)
gosp %>% count(woj,sort=T)
gosp %>% count(woj,klm,sort=T)
# podstawowe wykresy w R --------------------------------------------------
gosp$woj %>%
table() %>%
barplot()
# podstawowe wykresy w ggplot2 --------------------------------------------
ggplot(data=gosp,
aes(x = woj)) +
geom_bar()
ggplot(data=gosp,
aes(x = reorder(woj,woj,length))) +
geom_bar()
ggplot(data=gosp,
aes(x = as.factor(klm))) +
geom_bar() +
facet_wrap(~woj)
ggplot(data=gosp,
aes(x = as.factor(klm))) +
geom_bar() +
facet_grid(~woj)
ggplot(data=gosp,
aes(x = as.factor(d63))) +
geom_bar() +
facet_wrap(~woj)
# sytuacja materialna -----------------------------------------------------
gosp %>%
mutate(d61 = factor(x = d61,
levels = 1:5,
labels = c('Bardzo dobra',
'Raczej dobra',
'Przeciętna',
'Raczej zła',
'Zła'),
ordered = T)) %>%
count(woj,d61) %>%
mutate(p = n/sum(n)) %>%
ggplot(data = .,
aes(x = woj,
y = p,
fill = d61)) +
geom_bar(stat = 'identity',
colour = 'black') +
scale_y_continuous(labels = percent) +
scale_fill_brewer(palette = 'Greens') +
theme_bw() +
coord_flip()
|
plot1 <- function() {
if(length(dat1) == 0) {
loaddata()
}
hist(dat1$Global_active_power, col="Red", xlab="Global Active Power (kilowatts)", ylab = "Frequency", main="Global Active Power")
dev.copy(png, file="plot1.png")
dev.off()
}
loaddata <- function() {
## Download and unzip file
download.file(url= "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "./data.zip")
unzip("data.zip")
## Read file in
dat <- read.table("./household_power_consumption.txt", sep=";", header = TRUE, na.strings = c("?"))
## Convert String date into a Date
dat[,1] <- as.Date(dat[,1], format="%d/%m/%Y")
##Subset data to get only 1-2Feb2007 readings
dat1 <<- subset(dat, Date == as.Date("1/2/2007", format="%d/%m/%Y") | Date == as.Date("2/2/2007", format="%d/%m/%Y"))
}
| /plot1.R | no_license | branimal/ExData_Plotting1 | R | false | false | 909 | r | plot1 <- function() {
if(length(dat1) == 0) {
loaddata()
}
hist(dat1$Global_active_power, col="Red", xlab="Global Active Power (kilowatts)", ylab = "Frequency", main="Global Active Power")
dev.copy(png, file="plot1.png")
dev.off()
}
loaddata <- function() {
## Download and unzip file
download.file(url= "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "./data.zip")
unzip("data.zip")
## Read file in
dat <- read.table("./household_power_consumption.txt", sep=";", header = TRUE, na.strings = c("?"))
## Convert String date into a Date
dat[,1] <- as.Date(dat[,1], format="%d/%m/%Y")
##Subset data to get only 1-2Feb2007 readings
dat1 <<- subset(dat, Date == as.Date("1/2/2007", format="%d/%m/%Y") | Date == as.Date("2/2/2007", format="%d/%m/%Y"))
}
|
model{
####Loop for Ultuna
for(j in 1:J_Ultuna)
{
Y_R_Ultuna[j,1]<-(SOC_init_Ultuna[j]*(1-Init_ratio_Ultuna))*0.5
Y_S_Ultuna[j,1]<-(SOC_init_Ultuna[j]*(1-Init_ratio_Ultuna))*0.5
Y_FYM_Ultuna[j,1]<-0
Y_GM_Ultuna[j,1]<-0
Y_PEA_Ultuna[j,1]<-0
Y_SAW_Ultuna[j,1]<-0
Y_SLU_Ultuna[j,1]<-0
Y_STR_Ultuna[j,1]<-0
O_Ultuna[j,1] <-SOC_init_Ultuna[j]*Init_ratio_Ultuna
for (i in 1:(N_Ultuna)){
#Inputs R (roots), with different allometric functions for crops
I_R_cereals_Ultuna[j,i] <- (1+exudates_coeff)*((Yields_cereals_Ultuna[j,i])*0.7*C_percent*(1/SR_cereals_ult))
I_R_root_crops_Ultuna[j,i] <- (1+exudates_coeff)*((Yields_root_crops_Ultuna[j,i])*0.7*0.32*C_percent*(1/SR_root_crops_ult))
I_R_oilseeds_Ultuna[j,i] <- (1+exudates_coeff)*((Yields_oilseeds_Ultuna[j,i])*0.7*C_percent*(1/SR_oilseeds_ult))
I_R_maize_Ultuna[j,i] <- (1+exudates_coeff)*((Yields_maize_Ultuna[j,i])*0.7*C_percent*(1/SR_maize_ult))
I_R_Ultuna[j,i] <- I_R_cereals_Ultuna[j,i]+I_R_root_crops_Ultuna[j,i]+I_R_oilseeds_Ultuna[j,i]+I_R_maize_Ultuna[j,i]
#Inputs S
I_S_Ultuna[j,i] <- ((Yields_cereals_Ultuna[j,i]+Yields_root_crops_Ultuna[j,i]+Yields_oilseeds_Ultuna[j,i])*stubbles_ratio_Ultuna+Yields_maize_Ultuna[j,i]*stubbles_ratio_Ultuna_maize)*C_percent
#Young R
Y_R_Ultuna[j,i+1] <- (I_R_Ultuna[j,i]+Y_R_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
Y_S_Ultuna[j,i+1] <- (I_S_Ultuna[j,i]+Y_S_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
Y_FYM_Ultuna[j,i+1] <- (I_FYM_Ultuna[j,i]+Y_FYM_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
Y_GM_Ultuna[j,i+1] <- (I_GM_Ultuna[j,i]+Y_GM_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
Y_PEA_Ultuna[j,i+1] <- (I_PEA_Ultuna[j,i]+Y_PEA_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
Y_SAW_Ultuna[j,i+1] <- (I_SAW_Ultuna[j,i]+Y_SAW_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
Y_SLU_Ultuna[j,i+1] <- (I_SLU_Ultuna[j,i]+Y_SLU_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
Y_STR_Ultuna[j,i+1] <- (I_STR_Ultuna[j,i]+Y_STR_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
#Old
fluxR_Ultuna[j,i] <- h_R_ult*((k1_ult*(Y_R_Ultuna[j,i]+I_R_Ultuna[j,i]))/(k2_ult-k1_ult))
fluxS_Ultuna[j,i] <- h_S_ult*((k1_ult*(Y_S_Ultuna[j,i]+I_S_Ultuna[j,i]))/(k2_ult-k1_ult))
#old flux manure
flux_FYM_Ultuna[j,i] <- h_FYM_ult*((k1_ult*(Y_FYM_Ultuna[j,i]+I_FYM_Ultuna[j,i]))/(k2_ult-k1_ult))
flux_GM_Ultuna[j,i] <- h_S_ult*((k1_ult*(Y_GM_Ultuna[j,i]+I_GM_Ultuna[j,i]))/(k2_ult-k1_ult))
flux_PEA_Ultuna[j,i] <- h_PEA_ult*((k1_ult*(Y_PEA_Ultuna[j,i]+I_PEA_Ultuna[j,i]))/(k2_ult-k1_ult))
flux_SAW_Ultuna[j,i] <- h_SAW_ult*((k1_ult*(Y_SAW_Ultuna[j,i]+I_SAW_Ultuna[j,i]))/(k2_ult-k1_ult))
flux_SLU_Ultuna[j,i] <- h_SLU_ult*((k1_ult*(Y_SLU_Ultuna[j,i]+I_SLU_Ultuna[j,i]))/(k2_ult-k1_ult))
flux_STR_Ultuna[j,i] <- h_S_ult*((k1_ult*(Y_STR_Ultuna[j,i]+I_STR_Ultuna[j,i]))/(k2_ult-k1_ult))
flux_sum_Ultuna[j,i]<-(fluxR_Ultuna[j,i]+
fluxS_Ultuna[j,i]+
flux_FYM_Ultuna[j,i]+
flux_GM_Ultuna[j,i]+
flux_PEA_Ultuna[j,i]+
flux_SAW_Ultuna[j,i]+
flux_SLU_Ultuna[j,i]+
flux_STR_Ultuna[j,i])
O_Ultuna[j,i+1] <- (O_Ultuna[j,i]-flux_sum_Ultuna[j,i])*exp(-k2_ult*re_Ultuna[j,i]) +
flux_sum_Ultuna[j,i]*exp(-k1_ult*re_Ultuna[j,i])
#Total C
Y_tot_Ultuna[j,i] <- Y_R_Ultuna[j,i] +
Y_S_Ultuna[j,i] +
Y_FYM_Ultuna[j,i] +
Y_GM_Ultuna[j,i] +
Y_PEA_Ultuna[j,i] +
Y_SAW_Ultuna[j,i] +
Y_SLU_Ultuna[j,i] +
Y_STR_Ultuna[j,i]
Tot_Ultuna[j,i] <- Y_tot_Ultuna[j,i] + O_Ultuna[j,i]
#Error of the measurement (assumed proportional to the measurement)
SOC_Ultuna[j,i] ~ dnorm(Tot_Ultuna[j,i],1/(error_SOC_Ultuna[j]*error_SOC_multiplier_Ultuna[j]))
}
}
##Parameters Ultuna
# Xie, Yajun. 2020. “A Meta-Analysis of Critique of Litterbag Method Used in Examining Decomposition of Leaf Litters.” Journal of Soils and Sediments 20 (4): 1881–86. https://doi.org/10.1007/s11368-020-02572-9.
#k1_ult ~ dunif(0.78, 1)
k1_ult ~ dnorm(0.6906054, 1/0.2225509)
k2_ult ~ dnorm(0.00605, 1/(0.00605*error_h))# T(0.00605-0.00605*limits_k2,0.00605+0.00605*limits_k2)
k2_ult_or ~ dnorm(0.00605, 1/(0.00605*error_h))# T(0.00605-0.00605*limits_k2,0.00605+0.00605*limits_k2)
h_S_ult ~ dnorm(0.125,1/(0.15*error_h)) T(0.125-0.125*limits_h,0.125+0.125*limits_h)
h_R_ult ~ dnorm(0.35,1/(0.35*error_h)) T(0.35-0.35*limits_h, 0.35+0.35*limits_h)
h_FYM_ult ~ dnorm(0.27,1/(0.27*error_h)) T(0.27-0.27*limits_h,0.27+0.27+limits_h)
h_PEA_ult ~ dnorm(0.59,1/(0.59*error_h)) T(0.59-0.59*limits_h, 0.59+0.59*limits_h)
h_SAW_ult ~ dnorm(0.25,1/(0.25*error_h)) T(0.25-0.25*limits_h,0.25+0.25*limits_h)
h_SLU_ult ~ dnorm(0.41,1/(0.41*error_h)) T(0.41-0.41*limits_h,0.41+0.41*limits_h)
#root/shoot ratios priors
SR_cereals_ult ~ dnorm(11, 1/(11*error_SR)) T(11-11*limit_SR,11+11*limit_SR)
SR_root_crops_ult ~ dnorm(29.49853, 1/(29.49853*limit_SR)) T(29.49853-29.49853*error_SR,29.49853+29.49853*limit_SR)
SR_oilseeds_ult ~ dnorm(8, 1/(8*error_SR)) T(8-8*limit_SR,8+8*limit_SR)
SR_maize_ult ~ dnorm(6.25, 1/(6.25*error_SR)) T(6.25-6.25*limit_SR,6.25+6.25*limit_SR)
exudates_coeff ~ dnorm(1.65,1/(1.65*0.1)) T(1.65*0.95,1.65*1.05)
Init_ratio_Ultuna ~ dnorm(0.9291667,1/(0.9291667*0.2)) T(0.8,0.98)
stubbles_ratio_Ultuna_maize ~ dnorm(0.04,1/0.01) T(0.01,0.08)
stubbles_ratio_Ultuna ~ dnorm(0.04,1/0.01) T(0.01,0.08)
C_percent ~ dunif(0.40, 0.51)
error_h<-0.1
limits_h<-0.3
limits_k2<-0.5
error_SR<-0.25
limit_SR<-0.5
}
| /JAGS_ICBM_3.1_Ultuna.R | no_license | ilmenichetti/ICBM_recalibration | R | false | false | 5,989 | r | model{
####Loop for Ultuna
for(j in 1:J_Ultuna)
{
Y_R_Ultuna[j,1]<-(SOC_init_Ultuna[j]*(1-Init_ratio_Ultuna))*0.5
Y_S_Ultuna[j,1]<-(SOC_init_Ultuna[j]*(1-Init_ratio_Ultuna))*0.5
Y_FYM_Ultuna[j,1]<-0
Y_GM_Ultuna[j,1]<-0
Y_PEA_Ultuna[j,1]<-0
Y_SAW_Ultuna[j,1]<-0
Y_SLU_Ultuna[j,1]<-0
Y_STR_Ultuna[j,1]<-0
O_Ultuna[j,1] <-SOC_init_Ultuna[j]*Init_ratio_Ultuna
for (i in 1:(N_Ultuna)){
#Inputs R (roots), with different allometric functions for crops
I_R_cereals_Ultuna[j,i] <- (1+exudates_coeff)*((Yields_cereals_Ultuna[j,i])*0.7*C_percent*(1/SR_cereals_ult))
I_R_root_crops_Ultuna[j,i] <- (1+exudates_coeff)*((Yields_root_crops_Ultuna[j,i])*0.7*0.32*C_percent*(1/SR_root_crops_ult))
I_R_oilseeds_Ultuna[j,i] <- (1+exudates_coeff)*((Yields_oilseeds_Ultuna[j,i])*0.7*C_percent*(1/SR_oilseeds_ult))
I_R_maize_Ultuna[j,i] <- (1+exudates_coeff)*((Yields_maize_Ultuna[j,i])*0.7*C_percent*(1/SR_maize_ult))
I_R_Ultuna[j,i] <- I_R_cereals_Ultuna[j,i]+I_R_root_crops_Ultuna[j,i]+I_R_oilseeds_Ultuna[j,i]+I_R_maize_Ultuna[j,i]
#Inputs S
I_S_Ultuna[j,i] <- ((Yields_cereals_Ultuna[j,i]+Yields_root_crops_Ultuna[j,i]+Yields_oilseeds_Ultuna[j,i])*stubbles_ratio_Ultuna+Yields_maize_Ultuna[j,i]*stubbles_ratio_Ultuna_maize)*C_percent
#Young R
Y_R_Ultuna[j,i+1] <- (I_R_Ultuna[j,i]+Y_R_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
Y_S_Ultuna[j,i+1] <- (I_S_Ultuna[j,i]+Y_S_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
Y_FYM_Ultuna[j,i+1] <- (I_FYM_Ultuna[j,i]+Y_FYM_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
Y_GM_Ultuna[j,i+1] <- (I_GM_Ultuna[j,i]+Y_GM_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
Y_PEA_Ultuna[j,i+1] <- (I_PEA_Ultuna[j,i]+Y_PEA_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
Y_SAW_Ultuna[j,i+1] <- (I_SAW_Ultuna[j,i]+Y_SAW_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
Y_SLU_Ultuna[j,i+1] <- (I_SLU_Ultuna[j,i]+Y_SLU_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
Y_STR_Ultuna[j,i+1] <- (I_STR_Ultuna[j,i]+Y_STR_Ultuna[j,i])*exp(-k1_ult*re_Ultuna[j,i])
#Old
fluxR_Ultuna[j,i] <- h_R_ult*((k1_ult*(Y_R_Ultuna[j,i]+I_R_Ultuna[j,i]))/(k2_ult-k1_ult))
fluxS_Ultuna[j,i] <- h_S_ult*((k1_ult*(Y_S_Ultuna[j,i]+I_S_Ultuna[j,i]))/(k2_ult-k1_ult))
#old flux manure
flux_FYM_Ultuna[j,i] <- h_FYM_ult*((k1_ult*(Y_FYM_Ultuna[j,i]+I_FYM_Ultuna[j,i]))/(k2_ult-k1_ult))
flux_GM_Ultuna[j,i] <- h_S_ult*((k1_ult*(Y_GM_Ultuna[j,i]+I_GM_Ultuna[j,i]))/(k2_ult-k1_ult))
flux_PEA_Ultuna[j,i] <- h_PEA_ult*((k1_ult*(Y_PEA_Ultuna[j,i]+I_PEA_Ultuna[j,i]))/(k2_ult-k1_ult))
flux_SAW_Ultuna[j,i] <- h_SAW_ult*((k1_ult*(Y_SAW_Ultuna[j,i]+I_SAW_Ultuna[j,i]))/(k2_ult-k1_ult))
flux_SLU_Ultuna[j,i] <- h_SLU_ult*((k1_ult*(Y_SLU_Ultuna[j,i]+I_SLU_Ultuna[j,i]))/(k2_ult-k1_ult))
flux_STR_Ultuna[j,i] <- h_S_ult*((k1_ult*(Y_STR_Ultuna[j,i]+I_STR_Ultuna[j,i]))/(k2_ult-k1_ult))
flux_sum_Ultuna[j,i]<-(fluxR_Ultuna[j,i]+
fluxS_Ultuna[j,i]+
flux_FYM_Ultuna[j,i]+
flux_GM_Ultuna[j,i]+
flux_PEA_Ultuna[j,i]+
flux_SAW_Ultuna[j,i]+
flux_SLU_Ultuna[j,i]+
flux_STR_Ultuna[j,i])
O_Ultuna[j,i+1] <- (O_Ultuna[j,i]-flux_sum_Ultuna[j,i])*exp(-k2_ult*re_Ultuna[j,i]) +
flux_sum_Ultuna[j,i]*exp(-k1_ult*re_Ultuna[j,i])
#Total C
Y_tot_Ultuna[j,i] <- Y_R_Ultuna[j,i] +
Y_S_Ultuna[j,i] +
Y_FYM_Ultuna[j,i] +
Y_GM_Ultuna[j,i] +
Y_PEA_Ultuna[j,i] +
Y_SAW_Ultuna[j,i] +
Y_SLU_Ultuna[j,i] +
Y_STR_Ultuna[j,i]
Tot_Ultuna[j,i] <- Y_tot_Ultuna[j,i] + O_Ultuna[j,i]
#Error of the measurement (assumed proportional to the measurement)
SOC_Ultuna[j,i] ~ dnorm(Tot_Ultuna[j,i],1/(error_SOC_Ultuna[j]*error_SOC_multiplier_Ultuna[j]))
}
}
##Parameters Ultuna
# Xie, Yajun. 2020. “A Meta-Analysis of Critique of Litterbag Method Used in Examining Decomposition of Leaf Litters.” Journal of Soils and Sediments 20 (4): 1881–86. https://doi.org/10.1007/s11368-020-02572-9.
#k1_ult ~ dunif(0.78, 1)
k1_ult ~ dnorm(0.6906054, 1/0.2225509)
k2_ult ~ dnorm(0.00605, 1/(0.00605*error_h))# T(0.00605-0.00605*limits_k2,0.00605+0.00605*limits_k2)
k2_ult_or ~ dnorm(0.00605, 1/(0.00605*error_h))# T(0.00605-0.00605*limits_k2,0.00605+0.00605*limits_k2)
h_S_ult ~ dnorm(0.125,1/(0.15*error_h)) T(0.125-0.125*limits_h,0.125+0.125*limits_h)
h_R_ult ~ dnorm(0.35,1/(0.35*error_h)) T(0.35-0.35*limits_h, 0.35+0.35*limits_h)
h_FYM_ult ~ dnorm(0.27,1/(0.27*error_h)) T(0.27-0.27*limits_h,0.27+0.27+limits_h)
h_PEA_ult ~ dnorm(0.59,1/(0.59*error_h)) T(0.59-0.59*limits_h, 0.59+0.59*limits_h)
h_SAW_ult ~ dnorm(0.25,1/(0.25*error_h)) T(0.25-0.25*limits_h,0.25+0.25*limits_h)
h_SLU_ult ~ dnorm(0.41,1/(0.41*error_h)) T(0.41-0.41*limits_h,0.41+0.41*limits_h)
#root/shoot ratios priors
SR_cereals_ult ~ dnorm(11, 1/(11*error_SR)) T(11-11*limit_SR,11+11*limit_SR)
SR_root_crops_ult ~ dnorm(29.49853, 1/(29.49853*limit_SR)) T(29.49853-29.49853*error_SR,29.49853+29.49853*limit_SR)
SR_oilseeds_ult ~ dnorm(8, 1/(8*error_SR)) T(8-8*limit_SR,8+8*limit_SR)
SR_maize_ult ~ dnorm(6.25, 1/(6.25*error_SR)) T(6.25-6.25*limit_SR,6.25+6.25*limit_SR)
exudates_coeff ~ dnorm(1.65,1/(1.65*0.1)) T(1.65*0.95,1.65*1.05)
Init_ratio_Ultuna ~ dnorm(0.9291667,1/(0.9291667*0.2)) T(0.8,0.98)
stubbles_ratio_Ultuna_maize ~ dnorm(0.04,1/0.01) T(0.01,0.08)
stubbles_ratio_Ultuna ~ dnorm(0.04,1/0.01) T(0.01,0.08)
C_percent ~ dunif(0.40, 0.51)
error_h<-0.1
limits_h<-0.3
limits_k2<-0.5
error_SR<-0.25
limit_SR<-0.5
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stonehaven_ts.R
\docType{data}
\name{stonehaven_ts}
\alias{stonehaven_ts}
\title{Wave and tidal data for a site near Stonehaven, Scotland}
\format{A data frame with 35040 rows and 8 variables
\describe{
\item{tidal_velocity}{depth averaged tidal velocity, in m/s}
\item{tidal_direction}{tidal direction}
\item{wave_height}{significant wave height, in m}
\item{wave_period}{wave period}
\item{wave_direction}{wave direction}
}}
\source{
\url{https://www.sciencedirect.com/science/article/pii/S0964569116302587}
}
\usage{
stonehaven_ts
}
\description{
A time series of wave and tidal data for a station near Stonehaven, Scotland.
Depth of this site is 38 metres and the D50 is 0.2 mm
Reference: https://www.sciencedirect.com/science/article/pii/S0964569116302587
}
\keyword{datasets}
| /man/stonehaven_ts.Rd | permissive | robertjwilson/bedshear | R | false | true | 870 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stonehaven_ts.R
\docType{data}
\name{stonehaven_ts}
\alias{stonehaven_ts}
\title{Wave and tidal data for a site near Stonehaven, Scotland}
\format{A data frame with 35040 rows and 8 variables
\describe{
\item{tidal_velocity}{depth averaged tidal velocity, in m/s}
\item{tidal_direction}{tidal direction}
\item{wave_height}{significant wave height, in m}
\item{wave_period}{wave period}
\item{wave_direction}{wave direction}
}}
\source{
\url{https://www.sciencedirect.com/science/article/pii/S0964569116302587}
}
\usage{
stonehaven_ts
}
\description{
A time series of wave and tidal data for a station near Stonehaven, Scotland.
Depth of this site is 38 metres and the D50 is 0.2 mm
Reference: https://www.sciencedirect.com/science/article/pii/S0964569116302587
}
\keyword{datasets}
|
testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005601379e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615853287-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 659 | r | testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005601379e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
library(driftR)
### Name: dr_drop
### Title: Dropping observations from the monitoring period
### Aliases: dr_drop
### ** Examples
testData <- data.frame(
Date = c("9/18/2015", "9/18/2015", "9/18/2015", "9/18/2015", "9/19/2015", "9/21/2015"),
Time = c("12:10:49", "12:15:50", "12:20:51", "12:25:51", "12:30:51", "12:35:51"),
Temp = c(14.76, 14.64, 14.57, 14.51, 14.50, 14.63),
SpCond = c(0.754, 0.750, 0.750, 0.749, 0.749, 0.749),
stringsAsFactors = FALSE
)
dr_drop(testData, head = 2)
dr_drop(testData, tail = 1)
dr_drop(testData, head = 2, tail = 1)
dr_drop(testData, dateVar = Date, timeVar = Time, from = "9/19/2015")
dr_drop(testData, dateVar = Date, timeVar = Time, from = "9/18/2015 12:25:51")
dr_drop(testData, dateVar = Date, timeVar = Time, to = "9/19/2015")
dr_drop(testData, dateVar = Date, timeVar = Time, to = "9/18/2015 12:25:51")
dr_drop(testData, dateVar = Date, timeVar = Time, from = "9/18/2015 12:25:51",
to = "9/19/2015 12:30:51")
dr_drop(testData, dateVar = Date, timeVar = Time, from = "9/18/2015 12:00", to = "9/19/2015 13:00")
dr_drop(testData, exp = Temp > 14.7)
| /data/genthat_extracted_code/driftR/examples/dr_drop.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,129 | r | library(driftR)
### Name: dr_drop
### Title: Dropping observations from the monitoring period
### Aliases: dr_drop
### ** Examples
testData <- data.frame(
Date = c("9/18/2015", "9/18/2015", "9/18/2015", "9/18/2015", "9/19/2015", "9/21/2015"),
Time = c("12:10:49", "12:15:50", "12:20:51", "12:25:51", "12:30:51", "12:35:51"),
Temp = c(14.76, 14.64, 14.57, 14.51, 14.50, 14.63),
SpCond = c(0.754, 0.750, 0.750, 0.749, 0.749, 0.749),
stringsAsFactors = FALSE
)
dr_drop(testData, head = 2)
dr_drop(testData, tail = 1)
dr_drop(testData, head = 2, tail = 1)
dr_drop(testData, dateVar = Date, timeVar = Time, from = "9/19/2015")
dr_drop(testData, dateVar = Date, timeVar = Time, from = "9/18/2015 12:25:51")
dr_drop(testData, dateVar = Date, timeVar = Time, to = "9/19/2015")
dr_drop(testData, dateVar = Date, timeVar = Time, to = "9/18/2015 12:25:51")
dr_drop(testData, dateVar = Date, timeVar = Time, from = "9/18/2015 12:25:51",
to = "9/19/2015 12:30:51")
dr_drop(testData, dateVar = Date, timeVar = Time, from = "9/18/2015 12:00", to = "9/19/2015 13:00")
dr_drop(testData, exp = Temp > 14.7)
|
tableFile3col <- function(input, output, session,
label_1 = "time", label_2 = "logS", label_3 = "temperature",
default_data = data.frame(x = 1, y = 1, z = 1)
) {
## File part
userFile <- reactive({
validate(need(input$file, label = "Text"))
input$file
})
file_frame <- reactive({
read.table(userFile()$datapath,
header = TRUE,
sep = input$sep,
dec = input$dec,
stringsAsFactors = FALSE)
})
excelFile <- reactive({
validate(need(input$excel_file, label = "Excel"))
input$excel_file
})
excel_frame <- reactive({
read_excel(excelFile()$datapath,
sheet = input$excel_sheet,
skip = input$excel_skip,
col_types = "numeric")
})
## Matrix part
input_manual <- reactive({
out <- input$manual_table
colnames(out) <- c(label_1, label_2, label_3)
as.data.frame(out)
})
## Select the right frame
out_table <- eventReactive(input$update_table, {
if (input$my_tabBox == "Old") {
input_manual()
} else if (input$my_tabBox == "Text") {
file_frame()
} else if (input$my_tabBox == "Excel"){
excel_frame()
} else {
hot_to_r(input$hot)
}
}, ignoreInit = FALSE, ignoreNULL = FALSE)
## Show the table
# output$my_table <- renderTable(out_table())
output$my_table <- renderPlot({
out_table() %>%
mutate(temperature = factor(temperature)) %>%
ggplot() +
geom_point(aes(x = time, y = logS, colour = temperature))
})
## Export the table
output$export_table <- downloadHandler(
filename = "mytable.csv",
content = function(file) {
write.table(out_table(),
file = file, row.names = FALSE, sep = "\t")
}
)
## Handsontable
output$hot = renderRHandsontable({
if (!is.null(input$hot)) {
DF = hot_to_r(input$hot)
} else {
DF = default_data
}
DF %>%
set_names(c(label_1, label_2, label_3)) %>%
rhandsontable() %>%
hot_table(highlightCol = TRUE, highlightRow = TRUE)
})
# Return the reactive that yields the data frame
return(out_table)
}
| /tableFile3col.R | no_license | albgarre/bioinactivation_FE | R | false | false | 2,622 | r |
tableFile3col <- function(input, output, session,
label_1 = "time", label_2 = "logS", label_3 = "temperature",
default_data = data.frame(x = 1, y = 1, z = 1)
) {
## File part
userFile <- reactive({
validate(need(input$file, label = "Text"))
input$file
})
file_frame <- reactive({
read.table(userFile()$datapath,
header = TRUE,
sep = input$sep,
dec = input$dec,
stringsAsFactors = FALSE)
})
excelFile <- reactive({
validate(need(input$excel_file, label = "Excel"))
input$excel_file
})
excel_frame <- reactive({
read_excel(excelFile()$datapath,
sheet = input$excel_sheet,
skip = input$excel_skip,
col_types = "numeric")
})
## Matrix part
input_manual <- reactive({
out <- input$manual_table
colnames(out) <- c(label_1, label_2, label_3)
as.data.frame(out)
})
## Select the right frame
out_table <- eventReactive(input$update_table, {
if (input$my_tabBox == "Old") {
input_manual()
} else if (input$my_tabBox == "Text") {
file_frame()
} else if (input$my_tabBox == "Excel"){
excel_frame()
} else {
hot_to_r(input$hot)
}
}, ignoreInit = FALSE, ignoreNULL = FALSE)
## Show the table
# output$my_table <- renderTable(out_table())
output$my_table <- renderPlot({
out_table() %>%
mutate(temperature = factor(temperature)) %>%
ggplot() +
geom_point(aes(x = time, y = logS, colour = temperature))
})
## Export the table
output$export_table <- downloadHandler(
filename = "mytable.csv",
content = function(file) {
write.table(out_table(),
file = file, row.names = FALSE, sep = "\t")
}
)
## Handsontable
output$hot = renderRHandsontable({
if (!is.null(input$hot)) {
DF = hot_to_r(input$hot)
} else {
DF = default_data
}
DF %>%
set_names(c(label_1, label_2, label_3)) %>%
rhandsontable() %>%
hot_table(highlightCol = TRUE, highlightRow = TRUE)
})
# Return the reactive that yields the data frame
return(out_table)
}
|
#' sva: a package for removing artifacts from microarray and sequencing data
#'
#' sva has functionality to estimate and remove artifacts from high dimensional data
#' the \code{\link{sva}} function can be used to estimate artifacts from microarray data
#' the \code{\link{svaseq}} function can be used to estimate artifacts from count-based
#' RNA-sequencing (and other sequencing) data. The \code{\link{ComBat}} function can be
#' used to remove known batch effecs from microarray data. The \code{\link{fsva}} function
#' can be used to remove batch effects for prediction problems.
#'
#'
#' A vignette is available by typing \code{browseVignettes("sva")} in the R prompt.
#'
#' @references For the package: Leek JT, Johnson WE, Parker HS, Jaffe AE, and Storey JD. (2012) The sva package for removing batch effects and other unwanted variation in high-throughput experiments. Bioinformatics DOI:10.1093/bioinformatics/bts034
#' @references For sva: Leek JT and Storey JD. (2008) A general framework for multiple testing dependence. Proceedings of the National Academy of Sciences , 105: 18718-18723.
#' @references For sva: Leek JT and Storey JD. (2007) Capturing heterogeneity in gene expression studies by `Surrogate Variable Analysis'. PLoS Genetics, 3: e161.
#' @references For Combat: Johnson WE, Li C, Rabinovic A (2007) Adjusting batch effects in microarray expression data using empirical Bayes methods. Biostatistics, 8 (1), 118-127
#' @references For svaseq: Leek JT (2014) svaseq: removing batch and other artifacts from count-based sequencing data. bioRxiv doi: TBD
#' @references For fsva: Parker HS, Bravo HC, Leek JT (2013) Removing batch effects for prediction problems with frozen surrogate variable analysis arXiv:1301.3947
#' @references For psva: Parker HS, Leek JT, Favorov AV, Considine M, Xia X, Chavan S, Chung CH, Fertig EJ (2014) Preserving biological heterogeneity with a permuted surrogate variable analysis for genomics batch correction Bioinformatics doi: 10.1093/bioinformatics/btu375
#'
#' @docType package
#' @author Jeffrey T. Leek, W. Evan Johnson, Hilary S. Parker, Andrew E. Jaffe, John D. Storey, Yuqing Zhang
#' @name sva
#'
#' @import genefilter
#' @import mgcv
#' @rawNamespace import(matrixStats, except = c(rowSds, rowVars))
#'
#' @useDynLib sva, .registration = TRUE
NULL
| /R/sva-package.R | no_license | wevanjohnson/sva-devel | R | false | false | 2,328 | r | #' sva: a package for removing artifacts from microarray and sequencing data
#'
#' sva has functionality to estimate and remove artifacts from high dimensional data
#' the \code{\link{sva}} function can be used to estimate artifacts from microarray data
#' the \code{\link{svaseq}} function can be used to estimate artifacts from count-based
#' RNA-sequencing (and other sequencing) data. The \code{\link{ComBat}} function can be
#' used to remove known batch effecs from microarray data. The \code{\link{fsva}} function
#' can be used to remove batch effects for prediction problems.
#'
#'
#' A vignette is available by typing \code{browseVignettes("sva")} in the R prompt.
#'
#' @references For the package: Leek JT, Johnson WE, Parker HS, Jaffe AE, and Storey JD. (2012) The sva package for removing batch effects and other unwanted variation in high-throughput experiments. Bioinformatics DOI:10.1093/bioinformatics/bts034
#' @references For sva: Leek JT and Storey JD. (2008) A general framework for multiple testing dependence. Proceedings of the National Academy of Sciences , 105: 18718-18723.
#' @references For sva: Leek JT and Storey JD. (2007) Capturing heterogeneity in gene expression studies by `Surrogate Variable Analysis'. PLoS Genetics, 3: e161.
#' @references For Combat: Johnson WE, Li C, Rabinovic A (2007) Adjusting batch effects in microarray expression data using empirical Bayes methods. Biostatistics, 8 (1), 118-127
#' @references For svaseq: Leek JT (2014) svaseq: removing batch and other artifacts from count-based sequencing data. bioRxiv doi: TBD
#' @references For fsva: Parker HS, Bravo HC, Leek JT (2013) Removing batch effects for prediction problems with frozen surrogate variable analysis arXiv:1301.3947
#' @references For psva: Parker HS, Leek JT, Favorov AV, Considine M, Xia X, Chavan S, Chung CH, Fertig EJ (2014) Preserving biological heterogeneity with a permuted surrogate variable analysis for genomics batch correction Bioinformatics doi: 10.1093/bioinformatics/btu375
#'
#' @docType package
#' @author Jeffrey T. Leek, W. Evan Johnson, Hilary S. Parker, Andrew E. Jaffe, John D. Storey, Yuqing Zhang
#' @name sva
#'
#' @import genefilter
#' @import mgcv
#' @rawNamespace import(matrixStats, except = c(rowSds, rowVars))
#'
#' @useDynLib sva, .registration = TRUE
NULL
|
require(corrplot)
require(mlbench)
require(reshape2)
require(lattice)
require(e1071)
require(AppliedPredictiveModeling)
require(caret)
require(corrplot)
require(randomForest)
setwd("./kaggle/Santander Customer Satisfaction/")
train <- read.csv("./data/train.csv")
test <- read.csv("./data/test.csv")
sampsub <- read.csv("./data/sample_submission.csv")
hist(train$TARGET)
table(train$TARGET)
prop.table(table(train$TARGET))
# Stratified sample required.
target <- train$TARGET
train$training <- 1
test$training <- 0
data <- rbind(train[-371],test)
str(data)
x<-seq(1,371, by=80)
str(data[x[1]:x[2]])
str(data[x[2]:x[3]])
str(data[x[3]:x[4]])
str(data[x[4]:x[5]])
# Removing highly correlated variables.
vars <- apply(data, 2, FUN=var)
zero_var <- names(vars[vars==0])
str(data[,names(data) %in% zero_var])
data2 <- data[,!names(data) %in% zero_var]
corr_matrix <- cor(data2,use="pairwise.complete.obs")
highCorr <- findCorrelation(corr_matrix, cutoff = 0.9)
length(highCorr)
corrplot(corr_matrix[1:150,1:150])
training <- data[data$training==1,]
training$target <- target
system.time(
model_rf <- randomForest(as.factor(target)~., data = training[,-1], ntree=500, mtry=50)
)
model_rf$importance
| /starter.R | no_license | fredrikskatland/Kaggle-Santander-Customer-Satisfaction | R | false | false | 1,204 | r | require(corrplot)
require(mlbench)
require(reshape2)
require(lattice)
require(e1071)
require(AppliedPredictiveModeling)
require(caret)
require(corrplot)
require(randomForest)
setwd("./kaggle/Santander Customer Satisfaction/")
train <- read.csv("./data/train.csv")
test <- read.csv("./data/test.csv")
sampsub <- read.csv("./data/sample_submission.csv")
hist(train$TARGET)
table(train$TARGET)
prop.table(table(train$TARGET))
# Stratified sample required.
target <- train$TARGET
train$training <- 1
test$training <- 0
data <- rbind(train[-371],test)
str(data)
x<-seq(1,371, by=80)
str(data[x[1]:x[2]])
str(data[x[2]:x[3]])
str(data[x[3]:x[4]])
str(data[x[4]:x[5]])
# Removing highly correlated variables.
vars <- apply(data, 2, FUN=var)
zero_var <- names(vars[vars==0])
str(data[,names(data) %in% zero_var])
data2 <- data[,!names(data) %in% zero_var]
corr_matrix <- cor(data2,use="pairwise.complete.obs")
highCorr <- findCorrelation(corr_matrix, cutoff = 0.9)
length(highCorr)
corrplot(corr_matrix[1:150,1:150])
training <- data[data$training==1,]
training$target <- target
system.time(
model_rf <- randomForest(as.factor(target)~., data = training[,-1], ntree=500, mtry=50)
)
model_rf$importance
|
install.packages("networkD3")
library(networkD3)
data(MisLinks)
data(MisNodes)
forceNetwork(Links= MisLinks, Nodes = MisNodes,
Source = 'source', Target = "target",
Value = "value", NodeID = "name",
Group = "group", opacity = 0.8)
head(MisLinks)
head(MisNodes)
forceNetwork(Links= MisLinks, Nodes = MisNodes,
Source = 'source', Target = "target",
Value = "value", NodeID = "name",
Nodesize = "size",
Group = "group", opacity = 0.8) # opacity = 투명도
?forceNetwork | /etc/nerwork3D.R | no_license | jeshin32/mygit_jes | R | false | false | 563 | r | install.packages("networkD3")
library(networkD3)
data(MisLinks)
data(MisNodes)
forceNetwork(Links= MisLinks, Nodes = MisNodes,
Source = 'source', Target = "target",
Value = "value", NodeID = "name",
Group = "group", opacity = 0.8)
head(MisLinks)
head(MisNodes)
forceNetwork(Links= MisLinks, Nodes = MisNodes,
Source = 'source', Target = "target",
Value = "value", NodeID = "name",
Nodesize = "size",
Group = "group", opacity = 0.8) # opacity = 투명도
?forceNetwork |
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{shinyTable}
\alias{shinyTable}
\title{Shiny app to visualize dataframe as simple interactive datatable}
\usage{
shinyTable(df)
}
\arguments{
\item{df}{dataframe to be visualized}
}
\value{
Shiny App
}
\description{
Launches a basic Shiny App that renders the given dataframe into an interactive datatable using \code{renderDataTable}
}
\examples{
\dontrun{
shinyTable(mtcars)
}
}
| /man/shinyTable.Rd | permissive | Sunil-Pai-G/Rsenal | R | false | false | 440 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{shinyTable}
\alias{shinyTable}
\title{Shiny app to visualize dataframe as simple interactive datatable}
\usage{
shinyTable(df)
}
\arguments{
\item{df}{dataframe to be visualized}
}
\value{
Shiny App
}
\description{
Launches a basic Shiny App that renders the given dataframe into an interactive datatable using \code{renderDataTable}
}
\examples{
\dontrun{
shinyTable(mtcars)
}
}
|
/ID transform.R | no_license | HerRedwine/mylibrary | R | false | false | 710 | r | ||
library(proportion)
### Name: PloterrSC
### Title: Plots error, long term power and pass/fail criteria using Score
### method
### Aliases: PloterrSC
### ** Examples
n=20; alp=0.05; phi=0.05; f=-2
PloterrSC(n,alp,phi,f)
| /data/genthat_extracted_code/proportion/examples/PloterrSC.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 228 | r | library(proportion)
### Name: PloterrSC
### Title: Plots error, long term power and pass/fail criteria using Score
### method
### Aliases: PloterrSC
### ** Examples
n=20; alp=0.05; phi=0.05; f=-2
PloterrSC(n,alp,phi,f)
|
library(data.table)
library(plyr)
setwd("./downloads")
data <- fread("household_power_consumption.txt")
data2=subset(data,Date=='1/2/2007' | Date=="2/2/2007")
data3=mutate(data2,Time=as.POSIXct(strptime(paste(Date,Time),"%d/%m/%Y %H:%M:%S")))
data4=data.frame(data3) # change the data.table to a data frame format
data4[,3:9]=sapply(data4[,3:9],as.numeric)
plot(data4[,2],data4[,3],type="n",xlab="",ylab="Global Active Power (kilowatts)")
lines(data4[,2],data4[,3])
#save the image
png(filename="/Users/daniel/desktop/project/plot2.png",width=480,height=480,unit="px")
plot(data4[,2],data4[,3],type="n",xlab="",ylab="Global Active Power (kilowatts)")
lines(data4[,2],data4[,3])
dev.off() | /plot2.R | no_license | wuyu91410/ExData_Plotting1 | R | false | false | 690 | r | library(data.table)
library(plyr)
setwd("./downloads")
data <- fread("household_power_consumption.txt")
data2=subset(data,Date=='1/2/2007' | Date=="2/2/2007")
data3=mutate(data2,Time=as.POSIXct(strptime(paste(Date,Time),"%d/%m/%Y %H:%M:%S")))
data4=data.frame(data3) # change the data.table to a data frame format
data4[,3:9]=sapply(data4[,3:9],as.numeric)
plot(data4[,2],data4[,3],type="n",xlab="",ylab="Global Active Power (kilowatts)")
lines(data4[,2],data4[,3])
#save the image
png(filename="/Users/daniel/desktop/project/plot2.png",width=480,height=480,unit="px")
plot(data4[,2],data4[,3],type="n",xlab="",ylab="Global Active Power (kilowatts)")
lines(data4[,2],data4[,3])
dev.off() |
##########################################
# Coliphage analysis - 6 beaches
# v1 by Jade 7/13/15
# This file conducts maximum likelihood regression
# to estimate prevalence ratios
# Results stratified by beach
# 10 day gi illness
##########################################
rm(list=ls())
library(foreign)
setwd("~/Dropbox/Coliphage/")
# --------------------------------------
# load the and pre-preprocess the
# analysis dataset
# (refer to the base functions script
# for details on the pre-processing)
# --------------------------------------
beaches13=read.csv("~/Dropbox/13beaches/data/final/13beaches-analysis.csv")
# load base functions
source("Programs/Analysis/0-base-functions.R")
data=preprocess.6beaches(beaches13)
# restrict to 6 beaches with coliphage data
beach.list=c("Avalon","Doheny","Malibu","Mission Bay",
"Fairhope","Goddard")
all=data[data$beach %in% beach.list,]
avalon=data[data$beach %in% "Avalon",]
doheny=data[data$beach %in% "Doheny",]
malibu=data[data$beach %in% "Malibu",]
mission=data[data$beach %in% "Mission Bay",]
fairhope=data[data$beach %in% "Fairhope",]
goddard=data[data$beach %in% "Goddard",]
data.list=list(all=all,avalon=avalon,doheny=doheny,mission=mission,
malibu=malibu,goddard=goddard,fairhope=fairhope)
data.list=lapply(data.list,function(df){
# drop individuals with no water quality information
df=subset(df,nowq==0)
# subset to non-missing exposure categories
# to make the robust CI calcs work
df=subset(df,df$bodycontact=="Yes")
})
# convert from list back to data frames
list2env(data.list ,.GlobalEnv)
# --------------------------------------
# Calculate the actual Ns for each cell
# and store them for plotting and tables
# --------------------------------------
regN <- function(outcome,exposurecat) {
sum(table(outcome,exposurecat))
}
#avalon
av.n10.fmc1602 = regN(avalon$gici10,avalon$fmc1602.pres)
av.n10.fpc1601 = regN(avalon$gici10,avalon$fpc1601.pres)
av.n10.fpc1602 = regN(avalon$gici10,avalon$fpc1602.pres)
#doheny
do.n10.fmc1601 = regN(doheny$gici10,doheny$fmc1601.pres)
do.n10.fmc1602 = regN(doheny$gici10,doheny$fmc1602.pres)
do.n10.fpc1601 = regN(doheny$gici10,doheny$fpc1601.pres)
do.n10.fpc1602 = regN(doheny$gici10,doheny$fpc1602.pres)
# fairhope
fa.n10.fpc1601 = regN(fairhope$gici10,fairhope$fpc1601.pres)
# goddard
# fpc 1601 always present at goddard
# malibu
# fmc 1602 and fpc 1602 always present at malibu
ma.n10.fpc1601 = regN(malibu$gici10,malibu$fpc1601.pres)
# mission bay
mb.n10.fmc1601 = regN(mission$gici10,mission$fmc1601.pres)
mb.n10.fpc1601 = regN(mission$gici10,mission$fpc1601.pres)
# n if low risk conditions ---------------------------
#avalon
av.n10.fmc1602.int0 = regN(avalon$gici3[avalon$groundwater=="Below median flow"],
avalon$fmc1602.pres[avalon$groundwater=="Below median flow"])
av.n10.fpc1601.int0 = regN(avalon$gici3[avalon$groundwater=="Below median flow"],
avalon$fpc1601.pres[avalon$groundwater=="Below median flow"])
av.n10.fpc1602.int0 = regN(avalon$gici3[avalon$groundwater=="Below median flow"],
avalon$fpc1602.pres[avalon$groundwater=="Below median flow"])
#doheny
do.n10.fmc1602.int0 = regN(doheny$gici3[doheny$berm=="Closed"],
doheny$fmc1602.pres[doheny$berm=="Closed"])
do.n10.fpc1601.int0 = regN(doheny$gici3[doheny$berm=="Closed"],
doheny$fpc1601.pres[doheny$berm=="Closed"])
do.n10.fpc1602.int0 = regN(doheny$gici3[doheny$berm=="Closed"],
doheny$fpc1602.pres[doheny$berm=="Closed"])
# malibu
# fmc 1602 and fpc 1602 always present at malibu
ma.n10.fpc1601.int0 = regN(malibu$gici3[malibu$berm=="Closed"],malibu$fpc1601.pres[malibu$berm=="Closed"])
# n if high risk conditions ---------------------------
#avalon
av.n10.fmc1602.int1 = regN(avalon$gici3[avalon$groundwater=="Above median flow"],
avalon$fmc1602.pres[avalon$groundwater=="Above median flow"])
av.n10.fpc1601.int1 = regN(avalon$gici3[avalon$groundwater=="Above median flow"],
avalon$fpc1601.pres[avalon$groundwater=="Above median flow"])
av.n10.fpc1602.int1 = regN(avalon$gici3[avalon$groundwater=="Above median flow"],
avalon$fpc1602.pres[avalon$groundwater=="Above median flow"])
#doheny
do.n10.fmc1602.int1 = regN(doheny$gici3[doheny$berm=="Open"],
doheny$fmc1602.pres[doheny$berm=="Open"])
do.n10.fpc1601.int1 = regN(doheny$gici3[doheny$berm=="Open"],
doheny$fpc1601.pres[doheny$berm=="Open"])
do.n10.fpc1602.int1 = regN(doheny$gici3[doheny$berm=="Open"],
doheny$fpc1602.pres[doheny$berm=="Open"])
# malibu
# fmc 1602 and fpc 1602 always present at malibu
ma.n10.fpc1601.int1 = regN(malibu$gici3[malibu$berm=="Open"],malibu$fpc1601.pres[malibu$berm=="Open"])
#-----------------------------------------
# 10-day illness and coliphage concentration
# pooled across berm and groundwater flow conditions
# by beach
#-----------------------------------------
# avalon
# fmc1601 -- always present at avalon
avfit10.fmc1602 = mpreg(gici10~fmc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,vcv=T)
avfit10.fpc1601 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,vcv=T)
avfit10.fpc1602 = mpreg(gici10~fpc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,vcv=T)
# doheny
dofit10.fmc1601 = mpreg(gici10~fmc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny[!is.na(doheny$fmc1601.pres),],vcv=T)
dofit10.fmc1602 = mpreg(gici10~fmc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,vcv=T)
dofit10.fpc1601 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,vcv=T)
dofit10.fpc1602 = mpreg(gici10~fpc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,vcv=T)
# fairhope
fafit10.fpc1601 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=fairhope[!is.na(fairhope$fpc1601.pres),],vcv=T)
# goddard
# fpc 1601 always present at goddard
# malibu
# fmc 1602 and fpc 1602 always present at malibu
mafit10.fpc1601 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=malibu[!is.na(malibu$fpc1601.pres),],vcv=T)
# mission bay
mbfit10.fmc1601 = mpreg(gici10~fmc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=mission[!is.na(mission$fmc1601.pres),],vcv=T)
mbfit10.fpc1601 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=mission[!is.na(mission$fpc1601.pres),],vcv=T)
#-----------------------------------------
# 3-day illness and coliphage concentration
# by berm and groundwater flow conditions
# by beach
# interaction tests
#-----------------------------------------
# reduced model for LR test
avfit10.fmc1602.glm = glm(gici10~fmc1602.pres+groundwater+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,family=poisson(link="log"))
# interaction model
avfit10.fmc1602.gw = glm(gici10~fmc1602.pres*groundwater+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
family=poisson(link="log"),data=avalon)
summary(avfit10.fmc1602.gw)
lrtest(avfit10.fmc1602.glm,avfit10.fmc1602.gw)
# reduced model for LR test
avfit10.fpc1601.glm = glm(gici10~fpc1601.pres+groundwater+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,family=poisson(link="log"))
# interaction model
avfit10.fpc1601.gw = glm(gici10~fpc1601.pres*groundwater+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,family=poisson(link="log"))
summary(avfit10.fpc1601.gw)
lrtest(avfit10.fpc1601.glm,avfit10.fpc1601.gw)
# reduced model for LR test
avfit10.fpc1602.glm = glm(gici10~fpc1602.pres+groundwater+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,family=poisson(link="log"))
# interaction model
avfit10.fpc1602.gw = glm(gici10~fpc1602.pres*groundwater+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,family=poisson(link="log"))
summary(avfit10.fpc1602.gw)
lrtest(avfit10.fpc1602.glm,avfit10.fpc1602.gw)
# berm always closed when fmc1601 measured so no interaction assessment
# reduced model for LR test
dofit10.fmc1602.glm = glm(gici10~fmc1602.pres+berm+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,family=poisson(link="log"))
# interaction model
dofit10.fmc1602.berm = glm(gici10~fmc1602.pres*berm+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,family=poisson(link="log"))
summary(dofit10.fmc1602.berm)
lrtest(dofit10.fmc1602.glm,dofit10.fmc1602.berm)
# reduced model for LR test
dofit10.fpc1601.glm = glm(gici10~fpc1601.pres+berm+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,family=poisson(link="log"))
# interaction model
dofit10.fpc1601.berm = glm(gici10~fpc1601.pres*berm+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,family=poisson(link="log"))
summary(dofit10.fpc1601.berm)
lrtest(dofit10.fpc1601.glm,dofit10.fpc1601.berm)
# reduced model for LR test
dofit10.fpc1602.glm = glm(gici10~fpc1602.pres+berm+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,family=poisson(link="log"))
# interaction model
dofit10.fpc1602.berm = glm(gici10~fpc1602.pres*berm+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,family=poisson(link="log"))
summary(dofit10.fpc1602.berm)
lrtest(dofit10.fpc1602.glm,dofit10.fpc1602.berm)
# reduced model for LR test
mafit10.fpc1601.glm = glm(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=malibu[!is.na(malibu$fpc1601.pres),],family=poisson(link="log"))
# interaction model
mafit10.fpc1601.berm = glm(gici10~fpc1601.pres*berm+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=malibu[!is.na(malibu$fpc1601.pres),],family=poisson(link="log"))
summary(mafit10.fpc1601.berm)
lrtest(mafit10.fpc1601.glm,mafit10.fpc1601.berm)
#-----------------------------------------
# 3-day illness and coliphage concentration
# by berm and groundwater flow conditions
# by beach
# stratified estimates
#-----------------------------------------
avfit10.fmc1602.gw.1 = mpreg(gici10~fmc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=avalon[avalon$groundwater=="Above median flow",])
avfit10.fmc1602.gw.0 = mpreg(gici10~fmc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=avalon[avalon$groundwater=="Below median flow",])
avfit10.fpc1601.gw.1 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=avalon[avalon$groundwater=="Above median flow",])
avfit10.fpc1601.gw.0 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=avalon[avalon$groundwater=="Below median flow",])
avfit10.fpc1602.gw.1 = mpreg(gici10~fpc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=avalon[avalon$groundwater=="Above median flow",])
avfit10.fpc1602.gw.0 = mpreg(gici10~fpc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=avalon[avalon$groundwater=="Below median flow",])
dofit10.fmc1602.berm.1 = mpreg(gici10~fmc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=doheny[doheny$berm=="Open",])
dofit10.fmc1602.berm.0 = mpreg(gici10~fmc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=doheny[doheny$berm=="Closed",])
dofit10.fpc1601.berm.1 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=doheny[doheny$berm=="Open",])
dofit10.fpc1601.berm.0 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=doheny[doheny$berm=="Closed",])
dofit10.fpc1602.berm.1 = mpreg(gici10~fpc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=doheny[doheny$berm=="Open",])
dofit10.fpc1602.berm.0 = mpreg(gici10~fpc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=doheny[doheny$berm=="Closed",])
mafit10.fpc1601.berm.1 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=malibu[malibu$berm=="Open",])
mafit10.fpc1601.berm.0 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=malibu[!is.na(malibu$fpc1601.pres) & malibu$berm=="Closed",])
# --------------------------------------
# save the results
# exclude glm objects and data frames
# (they are really large)
# --------------------------------------
save(
av.n10.fmc1602, av.n10.fpc1601, av.n10.fpc1602, do.n10.fmc1601,
do.n10.fmc1602, do.n10.fpc1601, do.n10.fpc1602, fa.n10.fpc1601,
ma.n10.fpc1601, mb.n10.fmc1601, mb.n10.fpc1601,
av.n10.fmc1602.int0, av.n10.fpc1601.int0, av.n10.fpc1602.int0,
do.n10.fmc1602.int0, do.n10.fpc1601.int0, do.n10.fpc1602.int0,
ma.n10.fpc1601.int0,
av.n10.fmc1602.int1, av.n10.fpc1601.int1, av.n10.fpc1602.int1,
do.n10.fmc1602.int1, do.n10.fpc1601.int1, do.n10.fpc1602.int1,
ma.n10.fpc1601.int1,
avfit10.fmc1602, avfit10.fpc1601, avfit10.fpc1602,
dofit10.fmc1601, dofit10.fmc1602, dofit10.fpc1601, dofit10.fpc1602,
fafit10.fpc1601, mafit10.fpc1601,mbfit10.fmc1601,mbfit10.fpc1601,
avfit10.fmc1602.gw.1, avfit10.fmc1602.gw.0, avfit10.fpc1601.gw.1, avfit10.fpc1601.gw.0,
avfit10.fpc1602.gw.1, avfit10.fpc1602.gw.0,
dofit10.fmc1602.berm.1, dofit10.fmc1602.berm.0,
dofit10.fpc1601.berm.1, dofit10.fpc1601.berm.0, dofit10.fpc1602.berm.1, dofit10.fpc1602.berm.0,
mafit10.fpc1601.berm.1, mafit10.fpc1601.berm.0,
avfit10.fmc1602.gw, avfit10.fpc1601.gw, avfit10.fpc1602.gw,
dofit10.fmc1602.berm, dofit10.fpc1601.berm, dofit10.fpc1602.berm,
mafit10.fpc1601.berm,
file="~/dropbox/coliphage/results/rawoutput/regress-10day-body-beach.Rdata"
)
| /src/Analysis/0-archive/2d-regress-10day-body-beach.R | no_license | jadebc/13beaches-coliphage | R | false | false | 14,108 | r | ##########################################
# Coliphage analysis - 6 beaches
# v1 by Jade 7/13/15
# This file conducts maximum likelihood regression
# to estimate prevalence ratios
# Results stratified by beach
# 10 day gi illness
##########################################
rm(list=ls())
library(foreign)
setwd("~/Dropbox/Coliphage/")
# --------------------------------------
# load the and pre-preprocess the
# analysis dataset
# (refer to the base functions script
# for details on the pre-processing)
# --------------------------------------
beaches13=read.csv("~/Dropbox/13beaches/data/final/13beaches-analysis.csv")
# load base functions
source("Programs/Analysis/0-base-functions.R")
data=preprocess.6beaches(beaches13)
# restrict to 6 beaches with coliphage data
beach.list=c("Avalon","Doheny","Malibu","Mission Bay",
"Fairhope","Goddard")
all=data[data$beach %in% beach.list,]
avalon=data[data$beach %in% "Avalon",]
doheny=data[data$beach %in% "Doheny",]
malibu=data[data$beach %in% "Malibu",]
mission=data[data$beach %in% "Mission Bay",]
fairhope=data[data$beach %in% "Fairhope",]
goddard=data[data$beach %in% "Goddard",]
data.list=list(all=all,avalon=avalon,doheny=doheny,mission=mission,
malibu=malibu,goddard=goddard,fairhope=fairhope)
data.list=lapply(data.list,function(df){
# drop individuals with no water quality information
df=subset(df,nowq==0)
# subset to non-missing exposure categories
# to make the robust CI calcs work
df=subset(df,df$bodycontact=="Yes")
})
# convert from list back to data frames
list2env(data.list ,.GlobalEnv)
# --------------------------------------
# Calculate the actual Ns for each cell
# and store them for plotting and tables
# --------------------------------------
regN <- function(outcome,exposurecat) {
sum(table(outcome,exposurecat))
}
#avalon
av.n10.fmc1602 = regN(avalon$gici10,avalon$fmc1602.pres)
av.n10.fpc1601 = regN(avalon$gici10,avalon$fpc1601.pres)
av.n10.fpc1602 = regN(avalon$gici10,avalon$fpc1602.pres)
#doheny
do.n10.fmc1601 = regN(doheny$gici10,doheny$fmc1601.pres)
do.n10.fmc1602 = regN(doheny$gici10,doheny$fmc1602.pres)
do.n10.fpc1601 = regN(doheny$gici10,doheny$fpc1601.pres)
do.n10.fpc1602 = regN(doheny$gici10,doheny$fpc1602.pres)
# fairhope
fa.n10.fpc1601 = regN(fairhope$gici10,fairhope$fpc1601.pres)
# goddard
# fpc 1601 always present at goddard
# malibu
# fmc 1602 and fpc 1602 always present at malibu
ma.n10.fpc1601 = regN(malibu$gici10,malibu$fpc1601.pres)
# mission bay
mb.n10.fmc1601 = regN(mission$gici10,mission$fmc1601.pres)
mb.n10.fpc1601 = regN(mission$gici10,mission$fpc1601.pres)
# n if low risk conditions ---------------------------
#avalon
av.n10.fmc1602.int0 = regN(avalon$gici3[avalon$groundwater=="Below median flow"],
avalon$fmc1602.pres[avalon$groundwater=="Below median flow"])
av.n10.fpc1601.int0 = regN(avalon$gici3[avalon$groundwater=="Below median flow"],
avalon$fpc1601.pres[avalon$groundwater=="Below median flow"])
av.n10.fpc1602.int0 = regN(avalon$gici3[avalon$groundwater=="Below median flow"],
avalon$fpc1602.pres[avalon$groundwater=="Below median flow"])
#doheny
do.n10.fmc1602.int0 = regN(doheny$gici3[doheny$berm=="Closed"],
doheny$fmc1602.pres[doheny$berm=="Closed"])
do.n10.fpc1601.int0 = regN(doheny$gici3[doheny$berm=="Closed"],
doheny$fpc1601.pres[doheny$berm=="Closed"])
do.n10.fpc1602.int0 = regN(doheny$gici3[doheny$berm=="Closed"],
doheny$fpc1602.pres[doheny$berm=="Closed"])
# malibu
# fmc 1602 and fpc 1602 always present at malibu
ma.n10.fpc1601.int0 = regN(malibu$gici3[malibu$berm=="Closed"],malibu$fpc1601.pres[malibu$berm=="Closed"])
# n if high risk conditions ---------------------------
#avalon
av.n10.fmc1602.int1 = regN(avalon$gici3[avalon$groundwater=="Above median flow"],
avalon$fmc1602.pres[avalon$groundwater=="Above median flow"])
av.n10.fpc1601.int1 = regN(avalon$gici3[avalon$groundwater=="Above median flow"],
avalon$fpc1601.pres[avalon$groundwater=="Above median flow"])
av.n10.fpc1602.int1 = regN(avalon$gici3[avalon$groundwater=="Above median flow"],
avalon$fpc1602.pres[avalon$groundwater=="Above median flow"])
#doheny
do.n10.fmc1602.int1 = regN(doheny$gici3[doheny$berm=="Open"],
doheny$fmc1602.pres[doheny$berm=="Open"])
do.n10.fpc1601.int1 = regN(doheny$gici3[doheny$berm=="Open"],
doheny$fpc1601.pres[doheny$berm=="Open"])
do.n10.fpc1602.int1 = regN(doheny$gici3[doheny$berm=="Open"],
doheny$fpc1602.pres[doheny$berm=="Open"])
# malibu
# fmc 1602 and fpc 1602 always present at malibu
ma.n10.fpc1601.int1 = regN(malibu$gici3[malibu$berm=="Open"],malibu$fpc1601.pres[malibu$berm=="Open"])
#-----------------------------------------
# 10-day illness and coliphage concentration
# pooled across berm and groundwater flow conditions
# by beach
#-----------------------------------------
# avalon
# fmc1601 -- always present at avalon
avfit10.fmc1602 = mpreg(gici10~fmc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,vcv=T)
avfit10.fpc1601 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,vcv=T)
avfit10.fpc1602 = mpreg(gici10~fpc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,vcv=T)
# doheny
dofit10.fmc1601 = mpreg(gici10~fmc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny[!is.na(doheny$fmc1601.pres),],vcv=T)
dofit10.fmc1602 = mpreg(gici10~fmc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,vcv=T)
dofit10.fpc1601 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,vcv=T)
dofit10.fpc1602 = mpreg(gici10~fpc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,vcv=T)
# fairhope
fafit10.fpc1601 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=fairhope[!is.na(fairhope$fpc1601.pres),],vcv=T)
# goddard
# fpc 1601 always present at goddard
# malibu
# fmc 1602 and fpc 1602 always present at malibu
mafit10.fpc1601 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=malibu[!is.na(malibu$fpc1601.pres),],vcv=T)
# mission bay
mbfit10.fmc1601 = mpreg(gici10~fmc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=mission[!is.na(mission$fmc1601.pres),],vcv=T)
mbfit10.fpc1601 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=mission[!is.na(mission$fpc1601.pres),],vcv=T)
#-----------------------------------------
# 3-day illness and coliphage concentration
# by berm and groundwater flow conditions
# by beach
# interaction tests
#-----------------------------------------
# reduced model for LR test
avfit10.fmc1602.glm = glm(gici10~fmc1602.pres+groundwater+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,family=poisson(link="log"))
# interaction model
avfit10.fmc1602.gw = glm(gici10~fmc1602.pres*groundwater+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
family=poisson(link="log"),data=avalon)
summary(avfit10.fmc1602.gw)
lrtest(avfit10.fmc1602.glm,avfit10.fmc1602.gw)
# reduced model for LR test
avfit10.fpc1601.glm = glm(gici10~fpc1601.pres+groundwater+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,family=poisson(link="log"))
# interaction model
avfit10.fpc1601.gw = glm(gici10~fpc1601.pres*groundwater+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,family=poisson(link="log"))
summary(avfit10.fpc1601.gw)
lrtest(avfit10.fpc1601.glm,avfit10.fpc1601.gw)
# reduced model for LR test
avfit10.fpc1602.glm = glm(gici10~fpc1602.pres+groundwater+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,family=poisson(link="log"))
# interaction model
avfit10.fpc1602.gw = glm(gici10~fpc1602.pres*groundwater+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=avalon,family=poisson(link="log"))
summary(avfit10.fpc1602.gw)
lrtest(avfit10.fpc1602.glm,avfit10.fpc1602.gw)
# berm always closed when fmc1601 measured so no interaction assessment
# reduced model for LR test
dofit10.fmc1602.glm = glm(gici10~fmc1602.pres+berm+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,family=poisson(link="log"))
# interaction model
dofit10.fmc1602.berm = glm(gici10~fmc1602.pres*berm+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,family=poisson(link="log"))
summary(dofit10.fmc1602.berm)
lrtest(dofit10.fmc1602.glm,dofit10.fmc1602.berm)
# reduced model for LR test
dofit10.fpc1601.glm = glm(gici10~fpc1601.pres+berm+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,family=poisson(link="log"))
# interaction model
dofit10.fpc1601.berm = glm(gici10~fpc1601.pres*berm+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,family=poisson(link="log"))
summary(dofit10.fpc1601.berm)
lrtest(dofit10.fpc1601.glm,dofit10.fpc1601.berm)
# reduced model for LR test
dofit10.fpc1602.glm = glm(gici10~fpc1602.pres+berm+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,family=poisson(link="log"))
# interaction model
dofit10.fpc1602.berm = glm(gici10~fpc1602.pres*berm+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=doheny,family=poisson(link="log"))
summary(dofit10.fpc1602.berm)
lrtest(dofit10.fpc1602.glm,dofit10.fpc1602.berm)
# reduced model for LR test
mafit10.fpc1601.glm = glm(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=malibu[!is.na(malibu$fpc1601.pres),],family=poisson(link="log"))
# interaction model
mafit10.fpc1601.berm = glm(gici10~fpc1601.pres*berm+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
dat=malibu[!is.na(malibu$fpc1601.pres),],family=poisson(link="log"))
summary(mafit10.fpc1601.berm)
lrtest(mafit10.fpc1601.glm,mafit10.fpc1601.berm)
#-----------------------------------------
# 3-day illness and coliphage concentration
# by berm and groundwater flow conditions
# by beach
# stratified estimates
#-----------------------------------------
avfit10.fmc1602.gw.1 = mpreg(gici10~fmc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=avalon[avalon$groundwater=="Above median flow",])
avfit10.fmc1602.gw.0 = mpreg(gici10~fmc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=avalon[avalon$groundwater=="Below median flow",])
avfit10.fpc1601.gw.1 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=avalon[avalon$groundwater=="Above median flow",])
avfit10.fpc1601.gw.0 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=avalon[avalon$groundwater=="Below median flow",])
avfit10.fpc1602.gw.1 = mpreg(gici10~fpc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=avalon[avalon$groundwater=="Above median flow",])
avfit10.fpc1602.gw.0 = mpreg(gici10~fpc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=avalon[avalon$groundwater=="Below median flow",])
dofit10.fmc1602.berm.1 = mpreg(gici10~fmc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=doheny[doheny$berm=="Open",])
dofit10.fmc1602.berm.0 = mpreg(gici10~fmc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=doheny[doheny$berm=="Closed",])
dofit10.fpc1601.berm.1 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=doheny[doheny$berm=="Open",])
dofit10.fpc1601.berm.0 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=doheny[doheny$berm=="Closed",])
dofit10.fpc1602.berm.1 = mpreg(gici10~fpc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=doheny[doheny$berm=="Open",])
dofit10.fpc1602.berm.0 = mpreg(gici10~fpc1602.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=doheny[doheny$berm=="Closed",])
mafit10.fpc1601.berm.1 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=malibu[malibu$berm=="Open",])
mafit10.fpc1601.berm.0 = mpreg(gici10~fpc1601.pres+agecat+female+racewhite+gichron+anim_any+gicontactbase+rawfood,
vcv=T,dat=malibu[!is.na(malibu$fpc1601.pres) & malibu$berm=="Closed",])
# --------------------------------------
# save the results
# exclude glm objects and data frames
# (they are really large)
# --------------------------------------
save(
av.n10.fmc1602, av.n10.fpc1601, av.n10.fpc1602, do.n10.fmc1601,
do.n10.fmc1602, do.n10.fpc1601, do.n10.fpc1602, fa.n10.fpc1601,
ma.n10.fpc1601, mb.n10.fmc1601, mb.n10.fpc1601,
av.n10.fmc1602.int0, av.n10.fpc1601.int0, av.n10.fpc1602.int0,
do.n10.fmc1602.int0, do.n10.fpc1601.int0, do.n10.fpc1602.int0,
ma.n10.fpc1601.int0,
av.n10.fmc1602.int1, av.n10.fpc1601.int1, av.n10.fpc1602.int1,
do.n10.fmc1602.int1, do.n10.fpc1601.int1, do.n10.fpc1602.int1,
ma.n10.fpc1601.int1,
avfit10.fmc1602, avfit10.fpc1601, avfit10.fpc1602,
dofit10.fmc1601, dofit10.fmc1602, dofit10.fpc1601, dofit10.fpc1602,
fafit10.fpc1601, mafit10.fpc1601,mbfit10.fmc1601,mbfit10.fpc1601,
avfit10.fmc1602.gw.1, avfit10.fmc1602.gw.0, avfit10.fpc1601.gw.1, avfit10.fpc1601.gw.0,
avfit10.fpc1602.gw.1, avfit10.fpc1602.gw.0,
dofit10.fmc1602.berm.1, dofit10.fmc1602.berm.0,
dofit10.fpc1601.berm.1, dofit10.fpc1601.berm.0, dofit10.fpc1602.berm.1, dofit10.fpc1602.berm.0,
mafit10.fpc1601.berm.1, mafit10.fpc1601.berm.0,
avfit10.fmc1602.gw, avfit10.fpc1601.gw, avfit10.fpc1602.gw,
dofit10.fmc1602.berm, dofit10.fpc1601.berm, dofit10.fpc1602.berm,
mafit10.fpc1601.berm,
file="~/dropbox/coliphage/results/rawoutput/regress-10day-body-beach.Rdata"
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nn-dropout.R
\name{nn_dropout3d}
\alias{nn_dropout3d}
\title{Dropout3D module}
\usage{
nn_dropout3d(p = 0.5, inplace = FALSE)
}
\arguments{
\item{p}{(float, optional): probability of an element to be zeroed.}
\item{inplace}{(bool, optional): If set to \code{TRUE}, will do this operation
in-place}
}
\description{
Randomly zero out entire channels (a channel is a 3D feature map,
e.g., the \eqn{j}-th channel of the \eqn{i}-th sample in the
batched input is a 3D tensor \eqn{\mbox{input}[i, j]}).
}
\details{
Each channel will be zeroed out independently on every forward call with
probability \code{p} using samples from a Bernoulli distribution.
Usually the input comes from \link{nn_conv2d} modules.
As described in the paper
\href{https://arxiv.org/abs/1411.4280}{Efficient Object Localization Using Convolutional Networks} ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, \link{nn_dropout3d} will help promote independence between
feature maps and should be used instead.
}
\section{Shape}{
\itemize{
\item Input: \eqn{(N, C, D, H, W)}
\item Output: \eqn{(N, C, D, H, W)} (same shape as input)
}
}
\examples{
if (torch_is_installed()) {
m <- nn_dropout3d(p = 0.2)
input <- torch_randn(20, 16, 4, 32, 32)
output <- m(input)
}
}
| /man/nn_dropout3d.Rd | permissive | mlverse/torch | R | false | true | 1,526 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nn-dropout.R
\name{nn_dropout3d}
\alias{nn_dropout3d}
\title{Dropout3D module}
\usage{
nn_dropout3d(p = 0.5, inplace = FALSE)
}
\arguments{
\item{p}{(float, optional): probability of an element to be zeroed.}
\item{inplace}{(bool, optional): If set to \code{TRUE}, will do this operation
in-place}
}
\description{
Randomly zero out entire channels (a channel is a 3D feature map,
e.g., the \eqn{j}-th channel of the \eqn{i}-th sample in the
batched input is a 3D tensor \eqn{\mbox{input}[i, j]}).
}
\details{
Each channel will be zeroed out independently on every forward call with
probability \code{p} using samples from a Bernoulli distribution.
Usually the input comes from \link{nn_conv2d} modules.
As described in the paper
\href{https://arxiv.org/abs/1411.4280}{Efficient Object Localization Using Convolutional Networks} ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, \link{nn_dropout3d} will help promote independence between
feature maps and should be used instead.
}
\section{Shape}{
\itemize{
\item Input: \eqn{(N, C, D, H, W)}
\item Output: \eqn{(N, C, D, H, W)} (same shape as input)
}
}
\examples{
if (torch_is_installed()) {
m <- nn_dropout3d(p = 0.2)
input <- torch_randn(20, 16, 4, 32, 32)
output <- m(input)
}
}
|
library(shiny)
runGitHub( "shinyapps", "wennaxi", subdir = "ahdc_comm_detect_lda_18")
| /ahdc_comm_detect_stan_candidacy/run.R | no_license | wennaxi/shinyapps | R | false | false | 88 | r | library(shiny)
runGitHub( "shinyapps", "wennaxi", subdir = "ahdc_comm_detect_lda_18")
|
#' trait_match
#'
#' @name trait_match
#' @docType package
#' @description Run models to analyze trait matching in networks.
NULL | /R/trait_match.R | no_license | rogini98/traitmatch | R | false | false | 129 | r | #' trait_match
#'
#' @name trait_match
#' @docType package
#' @description Run models to analyze trait matching in networks.
NULL |
/88 squid/analysis.R | no_license | iMissile/R.projects | R | false | false | 5,187 | r | ||
error.bar <- function(x, y, upper, lower=upper, color,length=0.06,...){
if(length(x) != length(y) | length(y) !=length(lower) | length(lower) != length(upper))
stop("vectors must be same length")
arrows(x,y+upper, x, y-lower, col=color,angle=90, code=3, length=length, ...)
}
pdf("mping_intergenic_5distance_withsim.pdf")
par(mar=c(6,4,4,2), cex=1.2)
som5 <- read.table("random.mRNA.5primer.distance.distr")
#str5 <- read.table("../mPing_distr/Strains.mRNA.5primer.distance.distr")
#ril5 <- read.table("../mPing_distr/RIL.mRNA.5primer.distance.distr")
sim5 <- read.table("simulation_samplesize1000_numofrun10_results.mRNA.5primer.distance.distr")
som5 <- som5[-1,]
#str5 <- str5[-1,]
#ril5 <- ril5[-1,]
sim5 <- sim5[-1,]
som5 <- som5[-length(som5[,1]),]
#str5 <- str5[-length(str5[,1]),]
#ril5 <- ril5[-length(ril5[,1]),]
sim5 <- sim5[-length(sim5[,1]),]
plot(rev(som5[,4]), type='b', pch= 1,lwd = 2 , col="aquamarine3", xaxt='n', frame.plot = FALSE, ylim=c(0,0.2), ylab="Proportion", xlab="")
#lines(rev(ril5[,4]), type='b',pch= 2,lwd = 2 , col="steelblue2")
#lines(rev(str5[,4]), type='b',pch= 3,lwd = 2 , col="sandybrown")
lines(rev(sim5[,4]), type='b',pch= 20, cex=0.2,lwd = 2 , col="dim gray")
error.bar(1:length(sim5[,4]), rev(sim5[,4]), rev(sim5[,7]-sim5[,4]), rev(sim5[,7]-sim5[,4]), 'dim gray')
#yaxis <- seq(1:length(som5[,1])+0.5
axis(1,seq(1:length(som5[,1])),line=0, labels=rep("",length(som5[,1])))
text(seq(1:length(som5[,1][-1]))+0.5,rep(-0.02,7), cex=1, offset=2,labels=rev(som5[,1]*500/-1000)[-1],srt=55,xpd=TRUE)
legend('topright', bty='n', border='NA', lty= c(1,2,3,4), pch = c(1,2,3,20), cex=1 , lwd = 2 ,col=c("aquamarine3", "steelblue2", "sandybrown", "dim gray"), c("Somatic", "RIL", "Strains", "Simulation"))
mtext("Distance to TSS (kp)", side=1,cex=1.2, at=9,line=3)
dev.off()
| /bin/Compare_SimulateExcision/bin/mping_intergenic_5distance_withsim.R | no_license | wangpanqiao/Transposition | R | false | false | 1,829 | r |
error.bar <- function(x, y, upper, lower=upper, color,length=0.06,...){
if(length(x) != length(y) | length(y) !=length(lower) | length(lower) != length(upper))
stop("vectors must be same length")
arrows(x,y+upper, x, y-lower, col=color,angle=90, code=3, length=length, ...)
}
pdf("mping_intergenic_5distance_withsim.pdf")
par(mar=c(6,4,4,2), cex=1.2)
som5 <- read.table("random.mRNA.5primer.distance.distr")
#str5 <- read.table("../mPing_distr/Strains.mRNA.5primer.distance.distr")
#ril5 <- read.table("../mPing_distr/RIL.mRNA.5primer.distance.distr")
sim5 <- read.table("simulation_samplesize1000_numofrun10_results.mRNA.5primer.distance.distr")
som5 <- som5[-1,]
#str5 <- str5[-1,]
#ril5 <- ril5[-1,]
sim5 <- sim5[-1,]
som5 <- som5[-length(som5[,1]),]
#str5 <- str5[-length(str5[,1]),]
#ril5 <- ril5[-length(ril5[,1]),]
sim5 <- sim5[-length(sim5[,1]),]
plot(rev(som5[,4]), type='b', pch= 1,lwd = 2 , col="aquamarine3", xaxt='n', frame.plot = FALSE, ylim=c(0,0.2), ylab="Proportion", xlab="")
#lines(rev(ril5[,4]), type='b',pch= 2,lwd = 2 , col="steelblue2")
#lines(rev(str5[,4]), type='b',pch= 3,lwd = 2 , col="sandybrown")
lines(rev(sim5[,4]), type='b',pch= 20, cex=0.2,lwd = 2 , col="dim gray")
error.bar(1:length(sim5[,4]), rev(sim5[,4]), rev(sim5[,7]-sim5[,4]), rev(sim5[,7]-sim5[,4]), 'dim gray')
#yaxis <- seq(1:length(som5[,1])+0.5
axis(1,seq(1:length(som5[,1])),line=0, labels=rep("",length(som5[,1])))
text(seq(1:length(som5[,1][-1]))+0.5,rep(-0.02,7), cex=1, offset=2,labels=rev(som5[,1]*500/-1000)[-1],srt=55,xpd=TRUE)
legend('topright', bty='n', border='NA', lty= c(1,2,3,4), pch = c(1,2,3,20), cex=1 , lwd = 2 ,col=c("aquamarine3", "steelblue2", "sandybrown", "dim gray"), c("Somatic", "RIL", "Strains", "Simulation"))
mtext("Distance to TSS (kp)", side=1,cex=1.2, at=9,line=3)
dev.off()
|
setClass("SimSam",representation(
# Description
Name="character",Date="character",Author="character",
Notes="character",PrimarySource="character",
# Dimensions
nsim="integer",npop="integer",nages="integer", # MSE dimensions
nyears="integer",nsubyears="integer",nareas="integer", # MSE dimensions
proyears="integer", targpop="integer", nfleets="integer", # Proyears, number of management procedures
interval="integer",nma="integer",ma="array", # Number of movement age classes, age class definitions
nlen="integer",lenbins="numeric", # Proyears
mulen="numeric",
# Observation model
Cimp="numeric",Cb="numeric",Cerr="array",
Iimp="numeric",Ibeta="numeric",Ierr="array",
nCAAobs="numeric",nCALobs="numeric",Lcv="numeric",
Mb="numeric",Kb="numeric",t0b="numeric",Linfb="numeric",
LFCb="numeric",LFSb="numeric",
FMSYb="numeric",FMSY_Mb="numeric",BMSY_B0b="numeric",
ageMb="numeric",
Dimp="numeric", Db="numeric",Derr="array",
Btimp="numeric", Btb="numeric",Bterr="array",
Ftimp="numeric", Ftb="numeric",Fterr="array",
hb="numeric",
Recbcv="numeric",
IMSYb="numeric", MSYb="numeric", BMSYb="numeric",
# Management quantities
C="array",
D="array",
B_BMSY="array",
F_FMSY="array",
B="array",
SSB="array",
TAC="array",
simlist="list",
# Performance metrics
Perf="data.frame",
POF="array",
Y="array",
AAVY="array",
PB10="array",
PB50="array",
PB100="array"
))
setMethod("initialize", "SimSam", function(.Object,OM,Obs,movtype=2,OMDir="G:/M3",verbose=0,
complexF=0,complexRD=0,M3temp="C:/M3temp/"){
#.Object})
#.Object<-new('SimSam')
# Bias in fraction in spawning area (unfished)
# Auto-correlation in recrutiment deviations is currently disabled
set.seed(OM@seed)
if(class(OM)!='OM'){
print(paste('Could not run SimSam:',deparse(substitute(OMd)),'not of class OM'))
stop()
}
if(class(Obs)!='Obs'){
print(paste('Could not run SimSam:',deparse(substitute(Obs)),'not of class Obs'))
stop()
}
# copy over dimensions ------
dimslots<-slotNames(OM)[1:17]
for(i in 1:17)slot(.Object,dimslots[i])<-slot(OM,dimslots[i])
cat("Constructing arrays")
cat("\n")
flush.console()
# Dimensions S P A Y M R
nsim<-OM@nsim
npop<-OM@npop
nyears<-OM@nyears
proyears<-OM@proyears
nages<-OM@nages
nsubyears<-OM@nsubyears
nareas<-OM@nareas
nfleets<-OM@nfleets
.Object@nfleets<-nfleets
targpop<-as.integer(OM@targpop)
.Object@targpop<-targpop
allyears<-nyears+proyears
nlen<-OM@nlen
lenbins<-OM@lenbins
mulen<-OM@mulen
Wt_age<-OM@Wt_age
nZeq<-OM@nZeq
nydist<-OM@nydist
nyeq<-OM@nyeq
# Define arrays -----------------------------------------------------------
# Management variables
# !!!! This a temporary fix for simulation testing- keep maturity constant
ind2<-ind<-TEG(dim(OM@mat))
ind2[,4]<-1
OM@mat[ind]<-OM@mat[ind2]
OM@Wt_age[ind]<-OM@Wt_age[ind]
OM@Mmov<-OM@mov
OM@Recdevs[,,1]<-1
# Run historical simulation ----------------------------------------------
M<-OM@M
Mtemp<-array(0,dim(OM@M))
Mtemp[,,2:nages,]<-OM@M[,,1:(nages-1),]
surv=tomt(exp(-apply(Mtemp[,,,1],2:1,cumsum)))
surv[,,nages]<-surv[,,nages]*exp(-M[,,nages,1])/(1-exp(-M[,,nages,1]))
N<-SSN<-NSN<-SSB<-VBA<-Z<-array(NA,c(nsim,npop,nages,allyears,nsubyears,nareas)) # only need aggregated catch for these purposes
SSBA<-array(NA,c(nsim,npop,allyears))
FD<-array(NA,c(nsim,nfleets,allyears,nsubyears,nareas)) # Fishing distribution
Fdist<-array(NA,c(nsim,npop,nfleets,nareas))
FM<-VB<-C<-array(NA,c(nsim,npop,nages,allyears,nsubyears,nareas,nfleets))
CA<-array(NA,c(nsim,npop,allyears,nsubyears,nareas))
mref<-c(2:nsubyears,1) # movement reference
y<-1
m<-1
# need to remake all these for OM renewal
RFL<-array(NA,c(nsim,nfleets,nlen,nyears,nsubyears,nareas))
indL<-TEG(dim(RFL))
indE<-indL[,c(1,2,4,5,6)]
RFL[indL]<-OM@q[indL[,c(1,2)]]*OM@sel[indL[,1:3]]*OM@E[indE]
#got to here! translate RFL (fishing mort by length to fishing mort by age)
Ftrans<-array(0,c(nsim,nfleets,nyears,nsubyears,nages,nlen,nareas,npop))
Find<-TEG(dim(Ftrans))
Lind<-Find[,c(1,2,6,3,4,7)] # s f l y m r
Ftrans[Find]<-OM@iALK[Find[,c(1,8,3,5,6)]]*RFL[Lind]
RF<-apply(Ftrans,c(1,8,5,3,4,7,2),sum) # s p a y m r f
maxRF<-apply(RF,c(1,2,4,5,6,7),max) # s p y r f
Rind<-TEG(c(nsim,npop,nages,nyears,nsubyears,nareas,nfleets))#as.matrix(expand.grid(1:nsim,1:npop,1:nages,1:nyears,1:nareas,1:nfleets))
sel<-RF
sel[Rind]<-sel[Rind]/maxRF[Rind[,c(1,2,4,5,6,7)]]
sel<-sel[,,,nyears,nsubyears,,] # s p a r f # Take this from last year, in future simulations this may be by year so leave this code!
SFAYMR<-as.matrix(expand.grid(1:nsim, 1:nfleets,1:nages,y,m,1:nareas)) # Set up some array indexes
SFAY<-SFAYMR[,1:4]
SPAYMR<-as.matrix(expand.grid(1:nsim,1:npop,1:nages,y,m,1:nareas)) # Set up some array indexes
SARP<-SPAYMR[,c(1,3,6,2)]
SPA<-SPAYMR[,1:3]
SPR<-SPAYMR[,c(1,2,6)]
SPMR<-SPAYMR[,c(1,2,5,6)]
SP<-SPAYMR[,1:2]
SA<-SPAYMR[,c(1,3)]
SAR<-SPAYMR[,c(1,3,6)]
SPAR<-SPAYMR[,c(1:3,6)]
SPAY<-SPAYMR[,1:4]
SPAM<-SPAYMR[,c(1:3,5)]
# New model initialization ------------ pay paymrf
R0<- OM@R0
h<-OM@h
mat<-OM@mat
mov<-OM@mov
Zeq<-array(apply(M[,,,1:nZeq],1:3,mean),c(nsim,npop,nages,nsubyears,nareas))/nsubyears+apply(apply(RF[,,,1:nZeq,,,],1:6,sum),c(1,2,3,5,6),mean)
SSB0<-apply(surv*array(R0,dim(surv))*Wt_age[,,,1]*mat[,,,1],1:2,sum)
SSBpR<-SSB0/R0
stemp<-array(1/nareas,dim=c(nsim,npop,nsubyears,nareas))
movi<-mov[,,nages,,,]
for(y in 1:nydist){
for(m in 1:nsubyears){
if(m==1){
stemp[,,m,]<-apply(array(rep(stemp[,,nsubyears,],nareas)*movi[,,m,,],c(nsim,npop,nareas,nareas)),c(1,2,4),sum)
}else{
stemp[,,m,]<-apply(array(rep(stemp[,,m-1,],nareas)*movi[,,m,,],c(nsim,npop,nareas,nareas)),c(1,2,4),sum)
}
}
}
indN<-as.matrix(expand.grid(1:nsim,1:npop,1:nages,1,nsubyears,1:nareas))#
N[indN]=R0[indN[,1:2]]*surv[indN[,1:3]]*stemp[indN[,c(1,2,5,6)]]
SSB[,,,1,nsubyears,]<-N[,,,nyears,nsubyears,]*rep(Wt_age[,,,nyears],nareas)*rep(mat[,,,nyears],nareas)
for(y in 1:nyeq){
for(m in 1:nsubyears){
if(m==1){ # first subyear
N[,,,1,m,]<-exp(-Zeq[,,,nsubyears,])*N[,,,1,nsubyears,]
N[,,,1,m,]<-domov(N[,,,1,m,],mov[,,,m,,])
SSB[,,,1,m,]<-N[,,,1,m,]*rep(Wt_age[,,,nyears],nareas)*rep(mat[,,,nyears],nareas)
}else if(m==2){ # spawning subyear
N[,,,1,m,]<-exp(-Zeq[,,,m-1,])*N[,,,1,m-1,]
N[,,,1,m,]<-domov(N[,,,1,m,],mov[,,,m,,])
SSB[,,,1,m,]<-N[,,,1,m,]*rep(Wt_age[,,,nyears],nareas)*rep(mat[,,,nyears],nareas)
spawnr<-apply(SSB[,,,1,m,],c(1,2,4),sum)/array(apply(SSB[,,,1,m,],1:2,sum),dim(SSB)[c(1,2,6)])
SSBt<-apply(SSB[,,,1,m,],1:2,sum)
N[,,nages,1,m,]<-N[,,nages,1,m,]+N[,,nages-1,1,m,] # plus group
N[,,2:(nages-1),1,m,]<-N[,,1:(nages-2),1,m,]
N[,,1,1,m,]<-spawnr*array(((0.8*R0*h*SSBt)/(0.2*SSBpR*R0*(1-h)+(h-0.2)*SSBt)),dim(spawnr))
#print(sum(N[1,1,1,1,m,]))
#SSBA[,,1]<-apply(N[,,,1,m,]*array(Wt_age[,,,1]*OM@mat[,,,nyears],dim=c(nsim,npop,nages,nareas)),1:2,sum)
}else{ # after spawning subyear
N[,,,1,m,]<-exp(-Zeq[,,,m-1,])*N[,,,1,m-1,]
N[,,,1,m,]<-domov(N[,,,1,m,],mov[,,,m,,])
SSB[,,,1,m,]<-N[,,,1,m,]*rep(Wt_age[,,,nyears],nareas)*rep(mat[,,,nyears],nareas)
} # End of if subyear
} # end of subyear
} # end of equlibrium calculation year nyeq
bR<-log(5*h)/(0.8*SSB0) # Ricker SR params
aR<-exp(bR*SSB0)/SSBpR # Ricker SR params
y<-1
m<-1
SPAYMRF2<-as.matrix(expand.grid(1:nsim,1:npop,1:nages,y,m,1:nareas,1:nfleets))
SF2<-SPAYMRF2[,c(1,7)]
SFA2<-SPAYMRF2[,c(1,7,3)]
SFAR2<-SPAYMRF2[,c(1,7,3,6)]
SPRFA2<-SPAYMRF2[,c(1,2,6,7,3)]
SPFR2<-SPAYMRF2[,c(1,2,7,6)]
SPAY2<-SPAYMRF2[,1:4]
SFAR2<-SPAYMRF2[,c(1,7,3,6)]
SFAYR2<-SPAYMRF2[,c(1,7,3,4,6)]
SPAYRF2<-SPAYMRF2[,c(1,2,3,4,6,7)]
SPARF2<-SPAYMRF2[,c(1,2,3,6,7)]
for(m in 1:nsubyears){
SPAYMRF2[,5]<-m
SPAYMR2<-SPAYMRF2[,1:6]
SPAYMR[,5]<-m
VB[SPAYMRF2]<-N[SPAYMR2]*Wt_age[SPAY2]*sel[SPARF2] # Calculate vunerable biomass
#FM[SPAYMRF2]<-RF[SPAYRF2]#*FD[FYMR2]
Ftot<-apply(RF[,,,y,m,,],1:4,sum)
Z[SPAYMR]<-Ftot[SPAR]+M[SPAY]/nsubyears
C[SPAYMRF2]<-N[SPAYMR2]*(1-exp(-Z[SPAYMR2]))*RF[SPAYMRF2]/Z[SPAYMR2] # need to add back in mortality rate before C calculation
#C[SPAYMRF2]<-N[SPAYMR2]*(1-exp(-Z[SPAYMR2]))*RF[SPAYMRF2]/Z[SPAYMR2]
}
SPAYMR[,5]<-1
SPAYMRF2[,5]<-1
SPAYMR2<-SPAYMRF2[,1:6]
cat("Re-running historical simulations")
cat("\n")
for(y in 2:nyears){
SPAYMR[,4]<-y
SPAY<-SPAYMR[,1:4]
SPAYMRF2[,4]<-y
SPAYRF2[,4]<-y
SPAY2<-SPAYMRF2[,1:4]
SFAY2<-SPAYMRF2[,c(1,7,3,4)]
SFAYR2<-SPAYMRF2[,c(1,7,3,4,6)]
SFAR2<-SPAYMRF2[,c(1,7,3,6)]
for(m in 1:nsubyears){
SPAYMR[,5]<-m
SPAM<-SPAYMR[,c(1:3,5)]
SPAYMRF2[,5]<-m
SFYMR2<-SPAYMRF2[,c(1,7,4:6)]
SPAYMR2<-SPAYMRF2[,1:6]
if(m==1){
N[,,,y,m,]<-N[,,,y-1,nsubyears,]*exp(-Z[,,,y-1,nsubyears,])
}else{
N[,,,y,m,]<-N[,,,y,m-1,]*exp(-Z[,,,y,m-1,])
}
# move fish
N[,,,y,m,]<-domov(N[,,,y,m,],OM@mov[,,,m,,])
VB[SPAYMRF2]<-N[SPAYMR2]*Wt_age[SPAY2]*sel[SPARF2] # Calculate prop to vunerable biomass
Ftot<-apply(RF[,,,y,m,,],1:4,sum)
Z[SPAYMR]<-Ftot[SPAR]+M[SPAY]/nsubyears
# harvest fish
#C[SPAYMRF2]<-N[SPAYMR2]*(exp(Z[SPAYMR2])-1)*RF[SPAYMRF2]/Z[SPAYMR2]
C[SPAYMRF2]<-N[SPAYMR2]*(1-exp(-Z[SPAYMR2]))*RF[SPAYMRF2]/Z[SPAYMR2]
# age individuals
for(pp in 1:npop){
if(OM@Recsubyr[pp]==m){
# age fish
SSBA[,pp,y]<-apply(N[,pp,,y-1,m,]*array(Wt_age[,pp,,nyears]*OM@mat[,pp,,nyears],dim=c(nsim,nages,nareas)),1,sum)
SSBdist<-apply(N[,pp,,y-1,m,]*array(Wt_age[,pp,,nyears]*OM@mat[,pp,,nyears],dim=c(nsim,nages,nareas)),c(1,3),sum)/SSBA[,pp,y]
N[,pp,nages,y,m,]<-N[,pp,nages,y,m,]+N[,pp,nages-1,y,m,]
N[,pp,2:(nages-1),y,m,]<-N[,pp,1:(nages-2),y,m,]
# recruit fish
if(OM@SRrel[pp]==1){ # Beverton-Holt recruitment
rec<-OM@Recdevs[,pp,y]*(0.8*OM@R0[,pp]*OM@h[,pp]*SSBA[,pp,y])/(0.2*SSBpR[,pp]*OM@R0[,pp]*(1-OM@h[,pp])+(OM@h[,pp]-0.2)*SSBA[,pp,y])
}else{ # Most transparent form of the Ricker uses alpha and beta params
rec<-OM@Recdevs[,pp,y]*aR[,pp]*SSBA[,pp,y]*exp(-bR[,pp]*SSBA[,pp,y])
}
N[,pp,1,y,m,]<-rec*SSBdist
} # if its the right subyear
} # end of pop
SSB[,,,y,m,]<-N[,,,y,m,]*rep(Wt_age[,,,nyears],nareas)*rep(mat[,,,nyears],nareas)
} # end of subyear
} # end of year
Bcur<-apply(N[,,,nyears,nsubyears,]*
array(Wt_age[,,,nyears]*OM@mat[,,,nyears],c(nsim,npop,nages,nareas)),1:2,sum)
#Bcur<-apply(N[ss,1,,nyears,nsubyears,]*
# array(Wt_age[ss,1,,nyears]*OM@mat[ss,1,,nyears],c(nages,nareas)),1:2,sum)
#Bcur<-sum(array(N[targpop,,nyears,nsubyears,],c(length(targpop),nages,nareas))*
# array(Wt_age[targpop,,nyears]*mat[targpop,,nyears],c(length(targpop),nages,nareas)))
SSBall<-N*array(Wt_age,dim(N))*array(OM@mat,dim(N))
RAI<-apply(SSBall,c(1,4,5,6),sum)
RAI<-RAI[,1:nyears,,]
RAI<-RAI/array(apply(RAI,1,mean),dim(RAI))
D<-Bcur/SSB0 # Check against OM@D (remember only targetpop is matched)
# Generate observation errors ---------------------------------------------
.Object@Cimp<-runif(nsim,Obs@Ccv[1],Obs@Ccv[2])
.Object@Cb<-trlnorm(nsim,1,Obs@Cbcv)
.Object@Cerr<-array(trlnorm(nsim*allyears,rep(.Object@Cb,allyears),rep(.Object@Cimp,allyears)),c(nsim,allyears))
.Object@Iimp<-runif(nsim,Obs@Icv[1],Obs@Icv[2])
.Object@Ierr<-array(trlnorm(nsim*allyears,1,rep(.Object@Iimp,allyears)),c(nsim,allyears))
.Object@Ibeta<-exp(runif(nsim,log(Obs@Ibeta[1]),log(Obs@Ibeta[2])))
.Object@Btimp<-runif(nsim,Obs@Btcv[1],Obs@Btcv[2])
.Object@Btb<-trlnorm(nsim,1,Obs@Btbcv)
.Object@Bterr<-array(trlnorm(nsim*allyears,rep(.Object@Btb,allyears),rep(.Object@Btimp,allyears)),c(nsim,allyears))
.Object@Mb<-trlnorm(nsim,1,Obs@Mbcv)
.Object@Kb<-trlnorm(nsim,1,Obs@Kbcv)
.Object@Linfb<-trlnorm(nsim,1,Obs@Linfbcv)
.Object@t0b<-rep(1,nsim)
# Generate data ------------------------------------------------
datfile<-paste(OMDir,"/M3.dat",sep="")
cat("\n")
cat("Generating data")
cat("\n")
#sof<-apply(array(OM@E[,,nyears]*OM@q,c(nsim,nfleets,nages))*sel,c(1,3),sum)
#sof<-sof/apply(sof,1,max)
SFAY1<-SFAY2
Find<-as.matrix(expand.grid(1:nsim,1:npop,1:nareas,1:nfleets))[,c(1,2,4,3)]
FindSF<-Find[,c(1,3)]
FindSPR<-Find[,c(1,2,4)]
SPFR3<-as.matrix(expand.grid(1:nsim,1:npop,1:nfleets,1:nareas))
SPR3<-SPFR3[,c(1,2,4)]
# Age-length key --
#contour(OM@iALK[1,1,1,,])
# Spawning --
spawnr<-array(NA,c(nsim,npop,nareas))
for(pp in 1:npop){
m<-OM@Recsubyr[pp]
spawnr[,pp,]<-apply(SSN[,pp,,1,m,]*array(Wt_age[,pp,,1],dim=c(nsim,nages,nareas)),c(1,3),sum)/SSBA[,pp,y]
}
ind<-as.matrix(expand.grid(1:nsim,1:npop,1:nareas))
sums<-apply(spawnr,1:2,sum)
sind<-ind[,1:2]
spawnr[ind]<-spawnr[ind]/sums[sind]
# Fishery data -------------
# Catch
mult<-nyears*nsubyears*nareas*nfleets
#Cerr<-array(trlnorm(nsim*mult,rep(.Object@Cb,mult),rep(.Object@Cimp,mult)),c(nsim,nyears,nsubyears,nareas,nfleets))
Cerr<-array(trlnorm(nsim*mult,rep(1,mult),rep(.Object@Cimp,mult)),c(nsim,nyears,nsubyears,nareas,nfleets))
Cobsta<-array(NA,c(nsim,npop,nages,nyears,nsubyears,nareas,nfleets))
ind<-as.matrix(expand.grid(1:nsim,1:npop,1:nages,1:nyears,1:nsubyears,1:nareas,1:nfleets))
Cobsta[ind]<-C[ind]*Wt_age[ind[,1:4]]
Cobst<-apply(Cobsta,c(1,4:7),sum,na.rm=T)*Cerr
Cobsta<-apply(Cobsta,c(1,3:7),sum,na.rm=T)
# CPUE
Ierr<-array(trlnorm(nsim*mult,1,rep(.Object@Iimp,mult)),c(nsim,nyears,nsubyears,nareas,nfleets))
Ibeta<-exp(runif(nsim,log(Obs@Ibeta[1]),log(Obs@Ibeta[2])))
if(complexF==1){
# SYMRF SPAY M R F2
Iobst<-apply(VB[,,,1:nyears,,,],c(1, 4:7),sum)#^Ibeta
Isum<-apply(Iobst,c(1,5),mean)
ind<-as.matrix(expand.grid(1:nsim,1:nyears,1:nsubyears,1:nareas,1:nfleets))
#Iobst[ind]<-Ierr*(Iobst[ind]/Isum[ind[,c(1,5)]])
Iobst[ind]<-Iobst[ind]/Isum[ind[,c(1,5)]]
}else{
#Iobst<-Ierr*apply(VB[,,,1:nyears,,,],c(1,4,5,6,7),sum)#^Ibeta
apicalFage<-apply(OM@sel,1:2,which.max)
Iobst<-array(NA,dim=c(nsim,nyears,nsubyears,nareas,nfleets))
ind<-as.matrix(expand.grid(1:nsim,1:nyears,1:nsubyears,1:nareas,1:nfleets))
VBsum<-apply(VB[,,,1:nyears,,,],c(1,3:7),sum) # sum over pops
VBind<-cbind(ind[,1],apicalFage[ind[,c(1,5)]],ind[,2:5]) # add apical age to VBindex
#Iobst[ind]<--log(1-(Cobsta[VBind]/VBsum[VBind]))
Iobst[ind]<-OM@E[ind[,c(1,5,2,3,4)]]#(OMd@nsim,OMd@nfleets,OMd@nyears,OMd@nsubyears,OMd@nareas))
#Isum<-apply(Iobst,c(1,5),mean)
#Iobst[ind]<-(Iobst[ind]/Isum[ind[,c(1,5)]])
}
debugR<-F
if(debugR){
simo<-1
p<-1
age<-13
m<-1
f<-1
r<-1
ys<-1:25
test<-as.data.frame(cbind(Cobst[simo,ys,m,r,f],apply(VB[simo,,,ys,m,r,f],3,sum),Iobst[simo,ys,m,r,f],Cobst[simo,ys,m,r,f]/Iobst[simo,ys,m,r,f],FM[ss,p,8,ys,m,r,f],OM@E[ss,f,ys]))
test<-test/rep(apply(test,2,mean),each=nrow(test))
names(test)<-c("Cobs","VulnB","vBindex","Cobs/vBindex","FM","effort")
test
}
# Length composition
CALm<-array(NA,c(nsim,npop,nages,nyears,nsubyears,nareas,nfleets,nlen))
ind<-TEG(dim(CALm))
ALKind<-ind[,c(1,2,4,3,8)]
Cind<-ind[,1:7]
CALm[ind]<-C[Cind]*OM@iALK[ALKind]
# You were here simulating fishery independent SSB in the spawning area
ind<-as.matrix(expand.grid(1:nsim,1:npop,1:nages,1:nyears,OM@Recsubyr,1:nareas))
SSBtemp<-array(NA,c(nsim,npop,nages,nyears,nareas))
SSBtemp[ind[,c(1,2,3,4,6)]]<-N[ind]*Wt_age[ind[,1:4]]*OM@mat[ind[,1:4]]
SSBtemp<-apply(SSBtemp,c(1,2,4,5),sum)
SpawnA<-apply(SSBtemp,1:3,which.max)
FIobst<-array(NA,c(nsim,npop,nyears))
ind<-TEG(c(nsim,npop,nyears))
FIobst[ind]<-SSBtemp[cbind(ind,SpawnA[ind])]
meanFI<-apply(FIobst,1:2,mean)
FIobst[ind]<-FIobst[ind]/meanFI[ind[,1:2]]
FIerr<-array(trlnorm(nsim*mult,1,rep(.Object@Iimp,mult)),c(nsim,npop,nyears))
FIobst<-FIobst*FIerr
# Tagging data ---
nRPT<-2 # maximum number of timesteps that a tag may be recaptured in (n subyears)
temp<-rep(1:nsubyears,ceiling(nRPT/nsubyears)+nsubyears)
RPTind<-array(NA,c(nsubyears,nRPT))
for(ss in 1:nsubyears)RPTind[ss,]<-temp[ss:(ss+nRPT-1)]
for(sim in 1:nsim){ # Now loop over simulations, create data and write M3 files for parallel processing
simfolder<-paste(M3temp,sim,sep="")
if(!file.exists(simfolder))dir.create(simfolder)
file.copy(paste(OMDir,"/M3.exe",sep=""),simfolder,overwrite=T)
file.copy(paste(OMDir,"/M3.pin",sep=""),simfolder,overwrite=T)
datfile<-paste(simfolder,"/M3.dat",sep="")
#datfile<-"G:/M3/M3.dat"
# }
#sim<-1
print(sim)
print(Sys.time())
#datfile<-paste(OMDir,"/M3.dat",sep="")
cat("\n")
cat("Write data")
cat("\n")
# -- Model Dimensions --
write("# ny number of years",datfile,1,append=F)
write(nyears,datfile,1,append=T)
write("# ns number of subyears",datfile,1,append=T)
write(nsubyears,datfile,1,append=T)
write("# np number of populations/stocks",datfile,1,append=T)
write(npop,datfile,1,append=T)
write("# na number of age classes",datfile,1,append=T)
write(nages,datfile,1,append=T)
write("# nr number of regions/areas",datfile,1,append=T)
write(nareas,datfile,1,append=T)
write("# nf number of fleets",datfile,1,append=T)
write(nfleets,datfile,1,append=T)
write("# nl number of length classes",datfile,1,append=T)
write(nlen,datfile,1,append=T)
write("# nRPT maximum number of time steps that a PSAT can be recaptured",datfile,1,append=T)
write(nRPT,datfile,1,append=T)
write("# RPtind correct subyear recapture index",datfile,1,append=T)
write(t(RPTind),datfile,nRPT,append=T)
write("# sdur the duration of the various subyears (sums to 1)",datfile,1,append=T)
write(rep(1/nsubyears,nsubyears),datfile,nsubyears,append=T)
write("# nZeq: number of years at the start of the model to calculate equilibrium Z from",datfile,1,append=T)
write(nZeq,datfile,1,append=T)
write("# nydist: number of years over which initial stock distribution is calculated (prior to spool up)",datfile,1,append=T)
write(nydist,datfile,1,append=T)
write("# nyeq: number of spool-up years over which the stock is subject to nZeq, used to define equilibrium conditions",datfile,1,append=T)
write(nyeq,datfile,1,append=T)
write("# ml the mean length of the length categories",datfile,1,append=T)
write(mulen,datfile,nlen,append=T)
yblock<-5
RDblock<-rep(1:100,each=yblock)[1:nyears]
write("# RDblock the RD parameter for each year",datfile,1,append=T)
write(RDblock,datfile,nyears,append=T)
write("# nRD the number of estimated recruitment strengths",datfile,1,append=T)
if(complexRD==0)write(max(RDblock),datfile,nyears,append=T)
if(complexRD==1)write(nyears,datfile,nyears,append=T)
# -- Growth --
write("# iALK the age-length key by population and year p y a l",datfile,1,append=T)
write(tomt(OM@iALK[sim,,,,]),datfile,nlen,append=T)
write("# lwa weight-length parameter a w=al^ b",datfile,1,append=T)
write(OM@a,datfile,npop,append=T)
write("# lwa weight-length parameter b w=al^ b",datfile,1,append=T)
write(OM@b,datfile,npop,append=T)
write("# len_age (pay)",datfile,1,append=T)
write(OM@Len_age[sim,,,1:nyears],datfile,nyears,append=T)
write("# wt_age (pay)",datfile,1,append=T)
write(OM@Wt_age[sim,,,1:nyears],datfile,nyears,append=T)
# -- Maturity --
write("# Fec, fecundity at age, SSB at age",datfile,1,append=T)
write(t(Wt_age[sim,,,nyears]*OM@mat[sim,,,nyears]),datfile,nages,append=T)
write("# steep, steepness of the Bev-Holt SR relationship",datfile,1,append=T)
write(OM@h[sim,],datfile,npop,append=T)
# -- Spawning --
write("# spawns, the subyear in which the stock spawns",datfile,1,append=T)
write(OM@Recsubyr,datfile,npop,append=T)
#write("# spawnr, the fracton of recruits in each area",datfile,1,append=T)
#write(t(spawnr[sim,,]),datfile,nareas,append=T)
# -- Natural Mortality rate --
write("# Ma, instantaneous natural mortality rate at age",datfile,1,append=T)
write(t(M[sim,,,1]),datfile,nages,append=T)
# -- Fishery data --
# Catches / F init
if(complexF==1){
allobsbelow<-0.02 # level of catches at cumulative 2%
Cobs_cutoff<- min(Cobst[order(as.vector(Cobst))][cumsum(Cobst[order(as.vector(Cobst))])/sum(Cobst)>allobsbelow])
ind<-as.matrix(expand.grid(sim,1:nyears,1:nsubyears,1:nareas,1:nfleets))
cond<-Cobst[ind]>Cobs_cutoff
nind<-ind[cond,]
rat<-sum(Cobst[ind])/sum(Cobst[nind])
Cobs<-cbind(nind[,2:5],Cobst[nind])
Cobs[,5]<-Cobs[,5]*rat
}else{
ind<-as.matrix(expand.grid(sim,1:nyears,1:nsubyears,1:nareas,1:nfleets))
Cobs<-cbind(ind[,2:5],Cobst[ind])
}
#plot(density(Cobst[nind]))
#lines(density(Cobst[ind]),col='red')
#legend('topright',legend=c(round(rat,4),nrow(Cobs)))
write("# nCobs, the number of catch weight observations y s r f CW",datfile,1,append=T)
write(nrow(Cobs),datfile,1,append=T)
write("# Cobs, catch weight observations y s r f C(weight)",datfile,1,append=T)
write(t(Cobs),datfile,5,append=T)
# CPUE
ind<-as.matrix(expand.grid(sim,1:nyears,1:nsubyears,1:nareas,1:nfleets))
CPUEobs<-cbind(ind[,c(2:5,5)],Iobst[sim,,,,]) # fleet is index number
write("# nCPUE, the number of CPUE series",datfile,1,append=T)
write(nfleets,datfile,1,append=T) # in this simulation this is the same as the number of fleets
write("# nCPUEobs, the number of CPUE observations y s r f CPUE(weight)",datfile,1,append=T)
write(nrow(CPUEobs),datfile,1,append=T)
write("# CPUEobs, CPUE observations y s r f CPUE(weight)",datfile,1,append=T)
write(t(CPUEobs),datfile,6,append=T)
# Length composition
CALt<-CALm[sim,,,,,,,] # p a y s m f l
CALsum<-ceiling(apply(CALt,3:7,sum,na.rm=T)) # y s m f l
#CALtot<-apply(CALsum)
#par(mfrow=c(1,2))
#plot(CALsum[2,1,2,1,]/max(CALsum[2,1,2,1,]))
#lines(CALsum[2,1,2,2,]/max(CALsum[2,1,2,2,]),col='red')
#plot(OM@sel[sim,1,])
#lines(OM@sel[sim,2,],col='red')
ind<-as.matrix(expand.grid(1:nyears,1:nsubyears,1:nareas,1:nfleets,1:nlen))
cond<-CALsum>0
CLobs<-cbind(ind[cond,],CALsum[cond])
#CLobs<-cbind(ind,CALsum[ind])
sum(is.na(CLobs))
write("# nCLobs, the number of catch-at-length observations y s r f l N",datfile,1,append=T)
write(nrow(CLobs),datfile,1,append=T)
write("# CLobs, catch-at-length observations y s r f l N",datfile,1,append=T)
write(t(CLobs),datfile,6,append=T)
# The real relative abundance index RAI (y, s, r) !!! need to change this to real values
write("# RAI, Relative Abundance index r x s x y",datfile,1,append=T)
write(RAI[sim,,,],datfile,nyears,append=T)
# Fishery-independent indices y s r pp i type(biomass/ssb) index
ind<-as.matrix(expand.grid(sim,1:npop,1:nyears))
Iobs<-as.matrix(cbind(ind[,3],OM@Recsubyr[ind[,2]],SpawnA[ind],ind[,2],ind[,2],rep(2,nrow(ind)),FIobst[ind])) # type SSB
write("# nI, the number of fishery independent indices series",datfile,1,append=T)
write(npop,datfile,1,append=T) # in this simulation this is the same as the number of populations
write("# nIobs, the number of fishery independent observations y s r i type(biomass/ssb) index",datfile,1,append=T)
write(nrow(Iobs),datfile,1,append=T)
write("# Iobs, fishery independent observations y s r i type(biomass/ssb) index",datfile,1,append=T)
write(t(Iobs),datfile,7,append=T)
# PSAT tagging --
nPSATs<-10000
PSATdist<-apply(C[sim,,,,,,],c(1,2,4,5),sum,na.rm=T)^0.01
PSATdist<-PSATdist/apply(PSATdist,1,sum)
PSATdist<-ceiling(PSATdist/sum(PSATdist)*nPSATs)
nPSATs<-sum(PSATdist)
track<-array(NA,c(nPSATs,nRPT))
sy<-rep(NA,nPSATs)
SOO<-array(NA,c(nPSATs,npop))
nT<-1+ceiling(runif(nPSATs)*(nRPT-1)) # nT is the number of timesteps for recapture, this is set to 2 here,nRPT is the maximum number of timesteps that a tag may be recaptured
PSAT<-c(1,1,3,1,9,9)
PSAT2<-c(1,1,1,1,1,0.05,0.95)
j<-0
mov<-OM@mov[sim,,,,,]
for(pp in 1:npop){
for(aa in 1:nages){
for(ss in 1:nsubyears){
for(rr in 1:nareas){
if(PSATdist[pp,aa,ss,rr]>0){
for(i in 1:PSATdist[pp,aa,ss,rr]){
j<-j+1
SOO[j,]<-apply(C[sim,,aa,ceiling(nyears*0.7),ss,rr,],1,sum)/sum(C[sim,,aa,ceiling(nyears*0.7),ss,rr,]) #SPAYMRF
track[j,1]<-rr
sy[j]<-ss
#for(rpt in 2:nT[j]){
rpt<-2
m<-RPTind[ss,rpt]
track[j,rpt]<-(1:nareas)[rmultinom(1,1,mov[pp,aa,mref[m],track[j,rpt-1],])==1] #
SOO[j,]<-SOO[j,]*apply(C[sim,,aa,ceiling(nyears*0.7),m,track[j,rpt],],1,sum)/sum(C[sim,,aa,ceiling(nyears*0.7),m,track[j,rpt],])
#} # track length
SOO[j,]<-SOO[j,]/sum(SOO[j,])
#if(1%in%SOO[j,]){
# for(rpt in 2:nT[j]){
#m<-RPTind[ss,rpt]
PSAT<-rbind(PSAT,c(pp,OM@ma[aa,pp],ss,2,track[j,(rpt-1):rpt]))
#}
#}else{
# for(rpt in 2:nT[j]){
# #m<-RPTind[ss,rpt]
# PSAT2<-rbind(PSAT2,c(ss,2,track[j,(rpt-1):rpt],SOO[j,]))
#}
#}
} # tags
}
} # areas pp
} # ages
} # subyears
} # pops
PSAT<-PSAT[2:nrow(PSAT),]
PSAT<-aggregate(rep(1,nrow(PSAT)),by=list(PSAT[,1],PSAT[,2],PSAT[,3],PSAT[,4],PSAT[,5],PSAT[,6]),sum)
#testPSAT<-array(0,c(npop,OM@nma,nsubyears,nareas,nareas))
#testPSAT[as.matrix(PSAT[,c(1,2,3,5,6)])]<-PSAT[,7]
#PSAT2<-PSAT2[2:nrow(PSAT2),]
write("# nPSAT, PSATs data of known stock of origin p a s t fr tr N",datfile,1,append=T)
write(nrow(PSAT),datfile,1,append=T)
write("# PSAT data of known stock of origin p a s t fr tr N",datfile,1,append=T)
write(t(PSAT),datfile,7,append=T)
write("# nPSAT2, PSATs data of unknown stock of origin a s t fr tr SOO(npop)",datfile,1,append=T)
write(1,datfile,1,append=T)
#write(nrow(PSAT2),datfile,1,append=T)
write("# PSAT2 data of unknown stock of origin a s t fr tr SOO(npop)",datfile,1,append=T)
write(t(PSAT2),datfile,5+npop,append=T)
# Placeholder for conventional tags
Tag<-array(c(2,1,1,1,2,2,1,1,1,1),c(1,10))
write("# nTag, number of conventional tag observations y s r a - y s r f a N",datfile,1,append=T)
write(nrow(Tag),datfile,1,append=T)
write("# Tag, conventional tag observations y s r a - y s r f a N",datfile,1,append=T)
write(t(Tag),datfile,10,append=T)
# Stock of origin
NSOO<-min(ceiling(nyears*nsubyears*nareas/2),500) # number of data points in time and space
muSOO<-10 # mean number of observations at those points
SOO<-apply(C[sim,,,1:nyears,,,],1:5,sum,na.rm=T)
#Csum<-apply(SOO,2:4,sum)
rat<-mean(SOO,na.rm=T)/muSOO
SOO<-SOO/rat
ind<-expand.grid(1:nages,1:nyears,1:nsubyears,1:nareas)[sample(1:(nages*nyears*nsubyears*nareas),NSOO),]
ind<-as.matrix(cbind(rep(1:npop,rep=NSOO),ind[rep(1:NSOO,each=npop),]))
SOOobs<-as.matrix(cbind(ind,SOO[ind]))
SOOobs<-SOOobs[SOOobs[,6]>0,] # remove zeros
write("# nSOOobs, number of stock of origin observations p aa y s r N",datfile,1,append=T)
write(nrow(SOOobs),datfile,1,append=T)
write("# SOOobs, stock of origin observations p aa y s r N",datfile,1,append=T)
write(t(SOOobs),datfile,6,append=T)
# -- Selectivity controls
write("# nsel, number of estimated selectivities",datfile,1,append=T)
write(nfleets,datfile,1,append=T) # same as number of fleets
write("# seltype, 2:logistic, 3:Thompson",datfile,1,append=T)
write(c(2,3),datfile,nfleets,append=T) # first fleet is logistic
write("# selind, which selectivity is assigned to each fleet",datfile,1,append=T)
write(c(1,2),datfile,nfleets,append=T) # same as fleets
write("# ratiolim, limits on logistic slope parameter relative to inflection point",datfile,1,append=T)
write(c(0.1,1),datfile,nfleets,append=T) # same as fleets
write("# infleclim, limits on model selectivity",datfile,1,append=T)
write(c(4,15),datfile,nfleets,append=T) # same as fleets
# -- Movement estimation
mov<-array(NA,c(npop,OM@nma,nsubyears,nareas,nareas))
#mov[as.matrix(PSAT[,c(1,2,4,5)])]<-1
movind<-mov1<-c(1,1,1,1,1)
maclassfind<-match(1:OM@nma,OM@ma[,1])
mov<-OM@mov[sim,,maclassfind,,,] # p ma s fr tr
mov[mov>0]<-1
notanarea<-apply(mov,c(1,2,3,5),sum) # p ma s tr
notanarea<-array(as.integer(notanarea>0),dim(notanarea))
can<-apply(mov,c(1,5),sum) # can a movement happen from this area for this stock?
can<-array(as.integer(can>0),dim(can))
ind<-TEG(dim(mov))
mov[ind]<-mov[ind]*notanarea[ind[,c(1,2,3,4)]]
for(pp in 1:npop){
for(ma in 1:OM@nma){
for(ss in 1:nsubyears){
for(rr in 1:nareas){
np<-sum(mov[pp,ma,ss,rr,],na.rm=T)
if(np>0){
fR<-match(1,mov[pp,ma,ss,rr,])
mov1<-rbind(mov1,c(pp,ma,ss,rr,fR))
if(np>1){
oR<-grep(1,mov[pp,ma,ss,rr,])
oR<-oR[oR!=fR]
for(i in 1:length(oR)){
movind<-rbind(movind,c(pp,ma,ss,rr,oR[i]))
}
}
}
}
}
}
}
movind<-movind[2:nrow(movind),]
mov1<-mov1[2:nrow(mov1),]
if(movtype==1){ # if a gravity formulation these indices are for the to area that should be estimated by season
firstr<-apply(can,1,which.max)
mov1<-TEG(c(npop,OM@nma,nsubyears))
mov1<-cbind(mov1,firstr[mov1[,1]],rep(999,nrow(mov1)))
#mov1<-cbind(rep(1:npop,each=nsubyears),rep(1:nsubyears,npop),firstr[rep(1:npop,each=nsubyears)],rep(999,nsubyears*npop))
can2<-can
can2[cbind(1:npop,firstr)]<-0
can2<-t(can2)
nrest<-apply(can2,1,sum)
indr<-array(1:nareas,c(nareas,npop))
indp<-array(rep(1:npop,each=nareas),c(nareas,npop))
rs<-indr[can2==1]
ps<-indp[can2==1]
movindo<-cbind(rep(ps,each=nsubyears),rep(1:nsubyears,length(rs)),rep(rs,each=nsubyears),rep(999,length(rs)*nsubyears))
movind<-array(rep(movindo,each=OM@nma),dim=c(nrow(movindo)*OM@nma,4))
movind<-cbind(movind[,1],rep(1:OM@nma,nrow(movindo)),movind[,2:4])
}
write("# nMP, number of estimated movement parameters",datfile,1,append=T)
if(movtype==1)write(nrow(movind)+nsubyears*OM@nma*npop,datfile,1,append=T)
if(movtype==2)write(nrow(movind),datfile,1,append=T)
write("# nma, number of estimated movement age classes",datfile,1,append=T)
write(OM@nma,datfile,1,append=T)
write("# ma, assignment of age classes to age",datfile,1,append=T)
write(OM@ma,datfile,nages,append=T)
write("# nmovind, number of estimated movement parameters minus viscosity",datfile,1,append=T)
write(nrow(movind),datfile,1,append=T)
write("# movind, the location of estimated movement parameters p s r r",datfile,1,append=T)
write(t(movind),datfile,5,append=T)
write("# nmov1, number of initial non-estimated movement parameters",datfile,1,append=T)
write(nrow(mov1),datfile,1,append=T)
write("# mov1, the location of initial non-estimated movement parameters p s r r",datfile,1,append=T)
write(t(mov1),datfile,5,append=T)
write("# movtype, the type of movement parameterization 1: gravity 2:markov matrix",datfile,1,append=T)
write(movtype,datfile,1,append=T)
# -- Observation errors
write("# CobsCV, lognormal CV of the observed catches",datfile,1,append=T)
write(rep(0.2,nfleets),datfile,nfleets,append=T)
write("# CPUEobsCV, lognormal CV of the CPUE indices",datfile,1,append=T)
write(rep(0.2,nfleets),datfile,nfleets,append=T) # CPUE index for each fleet
write("# IobsCV, lognormal CV of the fishery independent indices",datfile,1,append=T)
write(rep(0.2,npop),datfile,npop,append=T) # SSB index for each population
# -- Priors
write("# RDCV, lognormal penalty on recruitment deviations",datfile,1,append=T)
write(2,datfile,1,append=T) # SSB index for each population
write("# nLHw, number of likelihood weights",datfile,1,append=T)
write(10,datfile,1,append=T) # SSB index for each population
write("# LHw, likelihood weights (1 catch, 2 cpue, 3 FIindex, 4 Lcomp, 5 SOO, 6 PSAT, 7 PSAT2, 8 RecDev, 9 mov, 10 sel)",datfile,1,append=T)
write(c( 10, 1, 1/100000000, 1/1000, 1/10, 1/100, 1, 1, 1, 2),datfile,10,append=T) # SSB index for each population
# -- Initial values
write("# R0_ini, initial values for log R0",datfile,1,append=T)
write(OM@R0[sim,],datfile,npop,append=T) # Simulated R0 for each population
write("# sel_ini, initial values for selectivity",datfile,1,append=T)
write(t(OM@sel[sim,,]),datfile,nlen,append=T) # Actual selectivity
write("# selpar_ini, initial values for selectivity parameters",datfile,1,append=T)
write(t(OM@selpars[sim,,]),datfile,3,append=T) # Actual selectivity
#RFL<-array(NA,c(nsim,nfleets,nlen,nyears,nsubyears,nareas))
Fsub<-array(NA,c(nyears,nsubyears,nareas,nfleets))
indt<-as.matrix(expand.grid(sim,1:nfleets,1:nyears,1:nsubyears,1:nareas))
Fsub[indt[,c(3,4,5,2)]]<-OM@E[indt]*OM@q[indt[,1:2]]
# old-----
#Fsum<-RF[sim,1,,,,,]
#ind<-TEG(c(nyears,nsubyears,nareas,nfleets))
#ind<-as.matrix(cbind(maxv[ind[,4]],ind[]))
#Fsub<-array(Fsum[ind],c(nyears,nsubyears,nareas,nfleets))
#-----
if(complexF==0)lnFini<-log(as.vector(Fsub))
if(complexF==1)lnFini<-log(Fsub[nind[,2:5]])
write("# lnF_ini, initial values for log F",datfile,1,append=T)
write(lnFini,datfile,nrow(Cobs),append=T) # log apical F
write("# ilnRD_ini, initial recruitment deviations y=1 a=2:nages",datfile,1,append=T)
write(array(0,c(nages-1,npop)),datfile,nages-1,append=T) # Initial recruitment deviations
write("# lnRD_ini, initial recruitment deviations y=1:nyears",datfile,1,append=T)
write(log(t(OM@Recdevs[sim,,1:nyears])),datfile,nyears,append=T) # Recruitment deviations
write("# mov_ini, simulated movement p s a r r",datfile,1,append=T) # this is a pain: M3 is p s a r r, OM@mov is p a s r r (oh well)
movt<-OM@mov[sim,,,,,] # p a s r r
mov<-array(NA,dim(movt)[c(1,3,2,4,5)]) # p s a r r
ind<-TEG(dim(movt))
mov[ind[,c(1,3,2,4,5)]]<-movt[ind]
write(tomt(mov),datfile,nareas,append=T) # Movement probabilities
write("# qCPUE_ini, initial values for CPUE catchability nCPUE",datfile,1,append=T)
if(complexF==1)write(log(1/Isum[sim,]),datfile,nfleets,append=T) # CPUE catchabilities I=qVB
if(complexF==0){
#apicalF<-apply(FM[sim,,,1:nyears,,,],c(1,3,4,5,6),max,na.rm=T)
write(log(OM@q[sim,]),datfile,nfleets,append=T) # CPUE catchabilities I=qVB
}
write("# qI_ini, initial values for fishery independent catchability nI",datfile,1,append=T)
write(log(rep(1,nfleets)),datfile,nfleets,append=T) # Catchabilities I=qSSB or I=qB
write("# D_ini, simulated depletion SSB/SSB0",datfile,1,append=T)
write(D[sim,],datfile,nfleets,append=T) # Catchabilities I=qSSB or I=qB
write("# complexRD 1= run with full estimation of all recruitment deviations by year",datfile,1,append=T)
write(complexRD,datfile,1,append=T) # debug switch
write("# complexF 1= run with full estimation of all F's by year, subyear, fleet, region",datfile,1,append=T)
write(complexF,datfile,1,append=T) # debug switch
write("# nF either nCobs or 1 if complexF=0",datfile,1,append=T)
if(complexF==0)write(1,datfile,1,append=T)
if(complexF==1)write(nrow(Cobs),datfile,1,append=T)
write("# debug 1= run with initial values",datfile,1,append=T)
write(0,datfile,1,append=T) # debug switch
write("# verbose 1= run with printouts",datfile,1,append=T)
write(verbose,datfile,1,append=T) # debug switch
write("# datacheck",datfile,1,append=T)
write(991199,datfile,1,append=T) # datacheck
#system(paste(OMDir,"M3.exe -est",sep="/"),wait=T,show.output.on.console = F) # run the exe
#if(sim==1)pin_from_par(file=paste(OMDir,"/M3",sep=""))
# Store results
#out[[sim]]<-M3read(OMDir)
}
spawnr=c(4,1)
B0<-apply(N[,,,1,1,]*array(Wt_age[,,,1],c(nsim,npop,nages,nareas)),c(1:2,4),sum)
B0<-B0/array(apply(B0,1:2,sum),dim(B0))
Bind<-expand.grid(1:nsim,1:npop)
Bfrac<-matrix(B0[as.matrix(cbind(Bind,spawnr[Bind[,2]]))],ncol=npop)
SSB1<-apply(N[,,,1,1,]*
array(Wt_age[,,,1]*OM@mat[,,,1],c(nsim,npop,nages,nareas)),1:2,sum)
SSBcur<-apply(N[,,,nyears,nsubyears,]*
array(Wt_age[,,,nyears]*OM@mat[,,,nyears],c(nsim,npop,nages,nareas)),1:2,sum)
Bcur<-apply(N[,,,nyears,nsubyears,]*
array(Wt_age[,,,nyears],c(nsim,npop,nages,nareas)),1:2,sum)
Cobsta<-array(NA,c(nsim,npop,nages,nsubyears,nareas,nfleets))
ind<-as.matrix(expand.grid(1:nsim,1:npop,1:nages,nyears,1:nsubyears,1:nareas,1:nfleets))
Cobsta[ind[,c(1:3,5:7)]]<-C[ind]*Wt_age[ind[,1:4]]
Cobsta<-apply(Cobsta,c(1,2,6),sum)
Ct<-apply(Cobsta,1:2,sum)
Urat<-Cobsta/array(Ct,dim(Cobsta))
U<-Ct/(apply(Bcur,1,sum)+Ct)
B0t<-apply(N[,,,1,1,]*array(Wt_age[,,,1],c(nsim,npop,nages,nareas)),c(1:2),sum)
ratB0<-B0t/apply(B0t,1,sum)
ratBcur<-Bcur/apply(Bcur,1,sum)
.Object@simlist<-list(SSB0=SSB0,D=SSBcur/SSB0,D1=SSBcur/SSB1,B0=B0t,Bfrac=Bfrac,Bcur=Bcur,Urat=Urat,U=U,ratB0=ratB0,ratBcur=ratBcur)
#Bfracp<-t(sapply(1:nsim,getBfrac,out,spawnr=spawnr))
#Bfracbias<-(Bfracp-Bfrac)/Bfrac
# Bias in current depletion (SSB)
#Dp<-t(sapply(1:nsim,getdep,out))
#Dbias<-(Dp-D)/D
# Bias in current SSB (absolute)
#SSBp<-t(sapply(1:nsim,getSSBnow,out))
#SSBbias<-(SSBp-Bcur)/Bcur
#Perf<-data.frame(Dbias,SSBbias,Bfracbias)
#names(Perf)<-c(paste("Dbias",1:npop,sep="_"),paste("SSBtbias",1:npop,sep="_"),paste("Bfracbias",1:npop,sep="_"))
#.Object@Perf<-Perf
#.Object@C[MP,,,]<-apply(C[,,,1:allyears,,,]*array(Wt_age[,,,1:allyears],c(nsim,npop,nages,allyears,nsubyears,nareas,nfleets)),c(1,2,4),sum)
#SSB<-apply(SSN[,,,1:allyears,4,]*array(Wt_age[,,,1:allyears],c(nsim,npop,nages,allyears,nareas)),c(1,2,4),sum)
#.Object@D[MP,,,]<-SSB/apply(SSB0,1,sum)
#B<-apply((SSN[,,,1:allyears,4,]+NSN[,,,1:allyears,4,])*array(Wt_age,c(nsim,npop,nages,allyears,nareas)),c(1:2,4),sum)
#Bthen<-apply((SSN[,,,1,4,]+NSN[,,,1,4,])*array(Wt_age[,,,1],c(nsim,npop,nages,nareas)),1:2,sum)
#.Object@B_BMSY[MP,,]<-apply(array(B[,targpop,],dim=c(nsim,length(targpop),allyears)),c(1,3),sum)/OM@BMSY
#U<-apply(array(.Object@C[MP,,targpop,],c(nsim,length(targpop),allyears)),c(1,3),sum)/
#apply(array(VBA[,targpop,,1:allyears,4,],c(nsim,length(targpop),nages,allyears,nareas)),c(1,4),sum)
#.Object@F_FMSY[MP,,]<-U/OM@UMSY
#cat("\n")
# #.Object@MPs<-MPs
.Object
})
# Operating model definition object ---------------------------------------------------------------------------------------------------------------------
setClass("OMd",representation(
# Description
Name="character",Date="character",Author="character",
Notes="character",PrimarySource="character",
# Dimensions
nsim="integer",npop="integer",nages="integer", # MSE dimensions
nyears="integer",nsubyears="integer",nareas="integer", # MSE dimensions
proyears="integer",nlen="integer",lenbins="numeric", # Projected years
interval="integer", # Update interval
nma="integer",ma="array", # Number of movement age classes, age class definitions
# Parameter ranges / simulation sample distributions
Magemu="array",Mrange="array",Msd="array",Mgrad="array", # Mean natural mortality rate at age, sample range, interannual variability and gradient % yr-1
SRrel="integer",h="array",recgrad="array", # Stock-recruitment relationship type, steepness, underlying gradient % yr-1
Reccv="array",AC="array", Recsubyr="integer", # CV of recruitment deviations and recruitment auto-correlation
Linf="array",K="array",t0="numeric", # Mean growth parameters
Ksd="array",Kgrad="array",Linfsd="array",Linfgrad="array", # Interannual variability in growth and mean trajectory % yr-1
a="numeric",b="numeric", # Weight - Length conversion W=aL^b
ageM="array",ageMsd="array",ageMgrad="array", # Age-at-maturity, interannual variability and gradient % yr-1
D="array",R0="array", # Current stock depletion, abundance
Size_area="array",mov="array", # Size of regions, Markov movement matrix for all fish and mature fish
movvar="numeric",movsd="array",movgrad="array", # Inter-simulation variability in movement, interannual-variability in movement, gradient changes in area gravity weights
excl="array", # Exclusion matrix [0,1] depending on whether the stock can go there
# Fleet specifications
nfleets="integer", # Number of fleets,
L05="array",VmaxL="array", LFS="array", # Length at 5% vulnerability, vulnerability of largest fish, length at full selection
Fsd="array",Fgrad="array", Frat="numeric", # Interannual variability in F, Final gradient in F yr-1
Spat_targ="array", # Spatial targetting parameter F =prop= V^Spat_targ
Area_names="character", Area_defs="list", # Area definitions (polygons)
targpop="numeric", # The target population for calculation of MSY and depletion reference points
#nZeq="integer", # The number of initial years to calculation equilibrium F
nydist="integer", # The number of years (iterations) taken to find equilibrium spatial distribution
#nyeq="integer", # The number of years (iterations) taken to find equilibrium F
# Observation properties relevant to trial specifications
Cbias="numeric",
# Misc
seed="numeric" # Random seed for the generation of the OM
))
# Plot spatial definitions of the OMd object
setMethod("plot", signature(x = "OMd"),function(x){
OMd<-x
cols<-rep(c("#ff000040","#00ff0040","#0000ff40","#00000040","#ff00ff40"),4)
res<-0.03
map(database = "worldHires",xlim=c(-105,50),ylim=c(-55,85),mar=rep(0,4),resolution=res)
abline(v=(-20:20)*10,col='light grey')
abline(h=(-20:20)*10,col='light grey')
abline(v=0,col="green")
abline(h=0,col="green")
map(database = "worldHires",mar=rep(0,4),border=0,xlim=c(-105,50), ylim=c(-55,85),add=T,fill=T,col="light grey",resolution=res)
for(i in 1:length(OMd@Area_names)){
polygon(OMd@Area_defs[[i]],border='blue',lwd=2,col=NA)#cols[i])
text(mean(OMd@Area_defs[[i]]$x),2.5+mean(OMd@Area_defs[[i]]$y),OMd@Area_names[i],col='red',font=2,cex=0.6)
}
})
#setMethod("plot", signature(x = "MSE"),function(x){
| /ABTMSE/R/Other_object_classes.R | no_license | samueldnj/AtlanticBluefinTuna | R | false | false | 45,293 | r |
setClass("SimSam",representation(
# Description
Name="character",Date="character",Author="character",
Notes="character",PrimarySource="character",
# Dimensions
nsim="integer",npop="integer",nages="integer", # MSE dimensions
nyears="integer",nsubyears="integer",nareas="integer", # MSE dimensions
proyears="integer", targpop="integer", nfleets="integer", # Proyears, number of management procedures
interval="integer",nma="integer",ma="array", # Number of movement age classes, age class definitions
nlen="integer",lenbins="numeric", # Proyears
mulen="numeric",
# Observation model
Cimp="numeric",Cb="numeric",Cerr="array",
Iimp="numeric",Ibeta="numeric",Ierr="array",
nCAAobs="numeric",nCALobs="numeric",Lcv="numeric",
Mb="numeric",Kb="numeric",t0b="numeric",Linfb="numeric",
LFCb="numeric",LFSb="numeric",
FMSYb="numeric",FMSY_Mb="numeric",BMSY_B0b="numeric",
ageMb="numeric",
Dimp="numeric", Db="numeric",Derr="array",
Btimp="numeric", Btb="numeric",Bterr="array",
Ftimp="numeric", Ftb="numeric",Fterr="array",
hb="numeric",
Recbcv="numeric",
IMSYb="numeric", MSYb="numeric", BMSYb="numeric",
# Management quantities
C="array",
D="array",
B_BMSY="array",
F_FMSY="array",
B="array",
SSB="array",
TAC="array",
simlist="list",
# Performance metrics
Perf="data.frame",
POF="array",
Y="array",
AAVY="array",
PB10="array",
PB50="array",
PB100="array"
))
setMethod("initialize", "SimSam", function(.Object,OM,Obs,movtype=2,OMDir="G:/M3",verbose=0,
complexF=0,complexRD=0,M3temp="C:/M3temp/"){
#.Object})
#.Object<-new('SimSam')
# Bias in fraction in spawning area (unfished)
# Auto-correlation in recrutiment deviations is currently disabled
set.seed(OM@seed)
if(class(OM)!='OM'){
print(paste('Could not run SimSam:',deparse(substitute(OMd)),'not of class OM'))
stop()
}
if(class(Obs)!='Obs'){
print(paste('Could not run SimSam:',deparse(substitute(Obs)),'not of class Obs'))
stop()
}
# copy over dimensions ------
dimslots<-slotNames(OM)[1:17]
for(i in 1:17)slot(.Object,dimslots[i])<-slot(OM,dimslots[i])
cat("Constructing arrays")
cat("\n")
flush.console()
# Dimensions S P A Y M R
nsim<-OM@nsim
npop<-OM@npop
nyears<-OM@nyears
proyears<-OM@proyears
nages<-OM@nages
nsubyears<-OM@nsubyears
nareas<-OM@nareas
nfleets<-OM@nfleets
.Object@nfleets<-nfleets
targpop<-as.integer(OM@targpop)
.Object@targpop<-targpop
allyears<-nyears+proyears
nlen<-OM@nlen
lenbins<-OM@lenbins
mulen<-OM@mulen
Wt_age<-OM@Wt_age
nZeq<-OM@nZeq
nydist<-OM@nydist
nyeq<-OM@nyeq
# Define arrays -----------------------------------------------------------
# Management variables
# !!!! This a temporary fix for simulation testing- keep maturity constant
ind2<-ind<-TEG(dim(OM@mat))
ind2[,4]<-1
OM@mat[ind]<-OM@mat[ind2]
OM@Wt_age[ind]<-OM@Wt_age[ind]
OM@Mmov<-OM@mov
OM@Recdevs[,,1]<-1
# Run historical simulation ----------------------------------------------
M<-OM@M
Mtemp<-array(0,dim(OM@M))
Mtemp[,,2:nages,]<-OM@M[,,1:(nages-1),]
surv=tomt(exp(-apply(Mtemp[,,,1],2:1,cumsum)))
surv[,,nages]<-surv[,,nages]*exp(-M[,,nages,1])/(1-exp(-M[,,nages,1]))
N<-SSN<-NSN<-SSB<-VBA<-Z<-array(NA,c(nsim,npop,nages,allyears,nsubyears,nareas)) # only need aggregated catch for these purposes
SSBA<-array(NA,c(nsim,npop,allyears))
FD<-array(NA,c(nsim,nfleets,allyears,nsubyears,nareas)) # Fishing distribution
Fdist<-array(NA,c(nsim,npop,nfleets,nareas))
FM<-VB<-C<-array(NA,c(nsim,npop,nages,allyears,nsubyears,nareas,nfleets))
CA<-array(NA,c(nsim,npop,allyears,nsubyears,nareas))
mref<-c(2:nsubyears,1) # movement reference
y<-1
m<-1
# need to remake all these for OM renewal
RFL<-array(NA,c(nsim,nfleets,nlen,nyears,nsubyears,nareas))
indL<-TEG(dim(RFL))
indE<-indL[,c(1,2,4,5,6)]
RFL[indL]<-OM@q[indL[,c(1,2)]]*OM@sel[indL[,1:3]]*OM@E[indE]
#got to here! translate RFL (fishing mort by length to fishing mort by age)
Ftrans<-array(0,c(nsim,nfleets,nyears,nsubyears,nages,nlen,nareas,npop))
Find<-TEG(dim(Ftrans))
Lind<-Find[,c(1,2,6,3,4,7)] # s f l y m r
Ftrans[Find]<-OM@iALK[Find[,c(1,8,3,5,6)]]*RFL[Lind]
RF<-apply(Ftrans,c(1,8,5,3,4,7,2),sum) # s p a y m r f
maxRF<-apply(RF,c(1,2,4,5,6,7),max) # s p y r f
Rind<-TEG(c(nsim,npop,nages,nyears,nsubyears,nareas,nfleets))#as.matrix(expand.grid(1:nsim,1:npop,1:nages,1:nyears,1:nareas,1:nfleets))
sel<-RF
sel[Rind]<-sel[Rind]/maxRF[Rind[,c(1,2,4,5,6,7)]]
sel<-sel[,,,nyears,nsubyears,,] # s p a r f # Take this from last year, in future simulations this may be by year so leave this code!
SFAYMR<-as.matrix(expand.grid(1:nsim, 1:nfleets,1:nages,y,m,1:nareas)) # Set up some array indexes
SFAY<-SFAYMR[,1:4]
SPAYMR<-as.matrix(expand.grid(1:nsim,1:npop,1:nages,y,m,1:nareas)) # Set up some array indexes
SARP<-SPAYMR[,c(1,3,6,2)]
SPA<-SPAYMR[,1:3]
SPR<-SPAYMR[,c(1,2,6)]
SPMR<-SPAYMR[,c(1,2,5,6)]
SP<-SPAYMR[,1:2]
SA<-SPAYMR[,c(1,3)]
SAR<-SPAYMR[,c(1,3,6)]
SPAR<-SPAYMR[,c(1:3,6)]
SPAY<-SPAYMR[,1:4]
SPAM<-SPAYMR[,c(1:3,5)]
# New model initialization ------------ pay paymrf
R0<- OM@R0
h<-OM@h
mat<-OM@mat
mov<-OM@mov
Zeq<-array(apply(M[,,,1:nZeq],1:3,mean),c(nsim,npop,nages,nsubyears,nareas))/nsubyears+apply(apply(RF[,,,1:nZeq,,,],1:6,sum),c(1,2,3,5,6),mean)
SSB0<-apply(surv*array(R0,dim(surv))*Wt_age[,,,1]*mat[,,,1],1:2,sum)
SSBpR<-SSB0/R0
stemp<-array(1/nareas,dim=c(nsim,npop,nsubyears,nareas))
movi<-mov[,,nages,,,]
for(y in 1:nydist){
for(m in 1:nsubyears){
if(m==1){
stemp[,,m,]<-apply(array(rep(stemp[,,nsubyears,],nareas)*movi[,,m,,],c(nsim,npop,nareas,nareas)),c(1,2,4),sum)
}else{
stemp[,,m,]<-apply(array(rep(stemp[,,m-1,],nareas)*movi[,,m,,],c(nsim,npop,nareas,nareas)),c(1,2,4),sum)
}
}
}
indN<-as.matrix(expand.grid(1:nsim,1:npop,1:nages,1,nsubyears,1:nareas))#
N[indN]=R0[indN[,1:2]]*surv[indN[,1:3]]*stemp[indN[,c(1,2,5,6)]]
SSB[,,,1,nsubyears,]<-N[,,,nyears,nsubyears,]*rep(Wt_age[,,,nyears],nareas)*rep(mat[,,,nyears],nareas)
for(y in 1:nyeq){
for(m in 1:nsubyears){
if(m==1){ # first subyear
N[,,,1,m,]<-exp(-Zeq[,,,nsubyears,])*N[,,,1,nsubyears,]
N[,,,1,m,]<-domov(N[,,,1,m,],mov[,,,m,,])
SSB[,,,1,m,]<-N[,,,1,m,]*rep(Wt_age[,,,nyears],nareas)*rep(mat[,,,nyears],nareas)
}else if(m==2){ # spawning subyear
N[,,,1,m,]<-exp(-Zeq[,,,m-1,])*N[,,,1,m-1,]
N[,,,1,m,]<-domov(N[,,,1,m,],mov[,,,m,,])
SSB[,,,1,m,]<-N[,,,1,m,]*rep(Wt_age[,,,nyears],nareas)*rep(mat[,,,nyears],nareas)
spawnr<-apply(SSB[,,,1,m,],c(1,2,4),sum)/array(apply(SSB[,,,1,m,],1:2,sum),dim(SSB)[c(1,2,6)])
SSBt<-apply(SSB[,,,1,m,],1:2,sum)
N[,,nages,1,m,]<-N[,,nages,1,m,]+N[,,nages-1,1,m,] # plus group
N[,,2:(nages-1),1,m,]<-N[,,1:(nages-2),1,m,]
N[,,1,1,m,]<-spawnr*array(((0.8*R0*h*SSBt)/(0.2*SSBpR*R0*(1-h)+(h-0.2)*SSBt)),dim(spawnr))
#print(sum(N[1,1,1,1,m,]))
#SSBA[,,1]<-apply(N[,,,1,m,]*array(Wt_age[,,,1]*OM@mat[,,,nyears],dim=c(nsim,npop,nages,nareas)),1:2,sum)
}else{ # after spawning subyear
N[,,,1,m,]<-exp(-Zeq[,,,m-1,])*N[,,,1,m-1,]
N[,,,1,m,]<-domov(N[,,,1,m,],mov[,,,m,,])
SSB[,,,1,m,]<-N[,,,1,m,]*rep(Wt_age[,,,nyears],nareas)*rep(mat[,,,nyears],nareas)
} # End of if subyear
} # end of subyear
} # end of equlibrium calculation year nyeq
bR<-log(5*h)/(0.8*SSB0) # Ricker SR params
aR<-exp(bR*SSB0)/SSBpR # Ricker SR params
y<-1
m<-1
SPAYMRF2<-as.matrix(expand.grid(1:nsim,1:npop,1:nages,y,m,1:nareas,1:nfleets))
SF2<-SPAYMRF2[,c(1,7)]
SFA2<-SPAYMRF2[,c(1,7,3)]
SFAR2<-SPAYMRF2[,c(1,7,3,6)]
SPRFA2<-SPAYMRF2[,c(1,2,6,7,3)]
SPFR2<-SPAYMRF2[,c(1,2,7,6)]
SPAY2<-SPAYMRF2[,1:4]
SFAR2<-SPAYMRF2[,c(1,7,3,6)]
SFAYR2<-SPAYMRF2[,c(1,7,3,4,6)]
SPAYRF2<-SPAYMRF2[,c(1,2,3,4,6,7)]
SPARF2<-SPAYMRF2[,c(1,2,3,6,7)]
for(m in 1:nsubyears){
SPAYMRF2[,5]<-m
SPAYMR2<-SPAYMRF2[,1:6]
SPAYMR[,5]<-m
VB[SPAYMRF2]<-N[SPAYMR2]*Wt_age[SPAY2]*sel[SPARF2] # Calculate vunerable biomass
#FM[SPAYMRF2]<-RF[SPAYRF2]#*FD[FYMR2]
Ftot<-apply(RF[,,,y,m,,],1:4,sum)
Z[SPAYMR]<-Ftot[SPAR]+M[SPAY]/nsubyears
C[SPAYMRF2]<-N[SPAYMR2]*(1-exp(-Z[SPAYMR2]))*RF[SPAYMRF2]/Z[SPAYMR2] # need to add back in mortality rate before C calculation
#C[SPAYMRF2]<-N[SPAYMR2]*(1-exp(-Z[SPAYMR2]))*RF[SPAYMRF2]/Z[SPAYMR2]
}
SPAYMR[,5]<-1
SPAYMRF2[,5]<-1
SPAYMR2<-SPAYMRF2[,1:6]
cat("Re-running historical simulations")
cat("\n")
for(y in 2:nyears){
SPAYMR[,4]<-y
SPAY<-SPAYMR[,1:4]
SPAYMRF2[,4]<-y
SPAYRF2[,4]<-y
SPAY2<-SPAYMRF2[,1:4]
SFAY2<-SPAYMRF2[,c(1,7,3,4)]
SFAYR2<-SPAYMRF2[,c(1,7,3,4,6)]
SFAR2<-SPAYMRF2[,c(1,7,3,6)]
for(m in 1:nsubyears){
SPAYMR[,5]<-m
SPAM<-SPAYMR[,c(1:3,5)]
SPAYMRF2[,5]<-m
SFYMR2<-SPAYMRF2[,c(1,7,4:6)]
SPAYMR2<-SPAYMRF2[,1:6]
if(m==1){
N[,,,y,m,]<-N[,,,y-1,nsubyears,]*exp(-Z[,,,y-1,nsubyears,])
}else{
N[,,,y,m,]<-N[,,,y,m-1,]*exp(-Z[,,,y,m-1,])
}
# move fish
N[,,,y,m,]<-domov(N[,,,y,m,],OM@mov[,,,m,,])
VB[SPAYMRF2]<-N[SPAYMR2]*Wt_age[SPAY2]*sel[SPARF2] # Calculate prop to vunerable biomass
Ftot<-apply(RF[,,,y,m,,],1:4,sum)
Z[SPAYMR]<-Ftot[SPAR]+M[SPAY]/nsubyears
# harvest fish
#C[SPAYMRF2]<-N[SPAYMR2]*(exp(Z[SPAYMR2])-1)*RF[SPAYMRF2]/Z[SPAYMR2]
C[SPAYMRF2]<-N[SPAYMR2]*(1-exp(-Z[SPAYMR2]))*RF[SPAYMRF2]/Z[SPAYMR2]
# age individuals
for(pp in 1:npop){
if(OM@Recsubyr[pp]==m){
# age fish
SSBA[,pp,y]<-apply(N[,pp,,y-1,m,]*array(Wt_age[,pp,,nyears]*OM@mat[,pp,,nyears],dim=c(nsim,nages,nareas)),1,sum)
SSBdist<-apply(N[,pp,,y-1,m,]*array(Wt_age[,pp,,nyears]*OM@mat[,pp,,nyears],dim=c(nsim,nages,nareas)),c(1,3),sum)/SSBA[,pp,y]
N[,pp,nages,y,m,]<-N[,pp,nages,y,m,]+N[,pp,nages-1,y,m,]
N[,pp,2:(nages-1),y,m,]<-N[,pp,1:(nages-2),y,m,]
# recruit fish
if(OM@SRrel[pp]==1){ # Beverton-Holt recruitment
rec<-OM@Recdevs[,pp,y]*(0.8*OM@R0[,pp]*OM@h[,pp]*SSBA[,pp,y])/(0.2*SSBpR[,pp]*OM@R0[,pp]*(1-OM@h[,pp])+(OM@h[,pp]-0.2)*SSBA[,pp,y])
}else{ # Most transparent form of the Ricker uses alpha and beta params
rec<-OM@Recdevs[,pp,y]*aR[,pp]*SSBA[,pp,y]*exp(-bR[,pp]*SSBA[,pp,y])
}
N[,pp,1,y,m,]<-rec*SSBdist
} # if its the right subyear
} # end of pop
SSB[,,,y,m,]<-N[,,,y,m,]*rep(Wt_age[,,,nyears],nareas)*rep(mat[,,,nyears],nareas)
} # end of subyear
} # end of year
Bcur<-apply(N[,,,nyears,nsubyears,]*
array(Wt_age[,,,nyears]*OM@mat[,,,nyears],c(nsim,npop,nages,nareas)),1:2,sum)
#Bcur<-apply(N[ss,1,,nyears,nsubyears,]*
# array(Wt_age[ss,1,,nyears]*OM@mat[ss,1,,nyears],c(nages,nareas)),1:2,sum)
#Bcur<-sum(array(N[targpop,,nyears,nsubyears,],c(length(targpop),nages,nareas))*
# array(Wt_age[targpop,,nyears]*mat[targpop,,nyears],c(length(targpop),nages,nareas)))
SSBall<-N*array(Wt_age,dim(N))*array(OM@mat,dim(N))
RAI<-apply(SSBall,c(1,4,5,6),sum)
RAI<-RAI[,1:nyears,,]
RAI<-RAI/array(apply(RAI,1,mean),dim(RAI))
D<-Bcur/SSB0 # Check against OM@D (remember only targetpop is matched)
# Generate observation errors ---------------------------------------------
.Object@Cimp<-runif(nsim,Obs@Ccv[1],Obs@Ccv[2])
.Object@Cb<-trlnorm(nsim,1,Obs@Cbcv)
.Object@Cerr<-array(trlnorm(nsim*allyears,rep(.Object@Cb,allyears),rep(.Object@Cimp,allyears)),c(nsim,allyears))
.Object@Iimp<-runif(nsim,Obs@Icv[1],Obs@Icv[2])
.Object@Ierr<-array(trlnorm(nsim*allyears,1,rep(.Object@Iimp,allyears)),c(nsim,allyears))
.Object@Ibeta<-exp(runif(nsim,log(Obs@Ibeta[1]),log(Obs@Ibeta[2])))
.Object@Btimp<-runif(nsim,Obs@Btcv[1],Obs@Btcv[2])
.Object@Btb<-trlnorm(nsim,1,Obs@Btbcv)
.Object@Bterr<-array(trlnorm(nsim*allyears,rep(.Object@Btb,allyears),rep(.Object@Btimp,allyears)),c(nsim,allyears))
.Object@Mb<-trlnorm(nsim,1,Obs@Mbcv)
.Object@Kb<-trlnorm(nsim,1,Obs@Kbcv)
.Object@Linfb<-trlnorm(nsim,1,Obs@Linfbcv)
.Object@t0b<-rep(1,nsim)
# Generate data ------------------------------------------------
datfile<-paste(OMDir,"/M3.dat",sep="")
cat("\n")
cat("Generating data")
cat("\n")
#sof<-apply(array(OM@E[,,nyears]*OM@q,c(nsim,nfleets,nages))*sel,c(1,3),sum)
#sof<-sof/apply(sof,1,max)
SFAY1<-SFAY2
Find<-as.matrix(expand.grid(1:nsim,1:npop,1:nareas,1:nfleets))[,c(1,2,4,3)]
FindSF<-Find[,c(1,3)]
FindSPR<-Find[,c(1,2,4)]
SPFR3<-as.matrix(expand.grid(1:nsim,1:npop,1:nfleets,1:nareas))
SPR3<-SPFR3[,c(1,2,4)]
# Age-length key --
#contour(OM@iALK[1,1,1,,])
# Spawning --
spawnr<-array(NA,c(nsim,npop,nareas))
for(pp in 1:npop){
m<-OM@Recsubyr[pp]
spawnr[,pp,]<-apply(SSN[,pp,,1,m,]*array(Wt_age[,pp,,1],dim=c(nsim,nages,nareas)),c(1,3),sum)/SSBA[,pp,y]
}
ind<-as.matrix(expand.grid(1:nsim,1:npop,1:nareas))
sums<-apply(spawnr,1:2,sum)
sind<-ind[,1:2]
spawnr[ind]<-spawnr[ind]/sums[sind]
# Fishery data -------------
# Catch
mult<-nyears*nsubyears*nareas*nfleets
#Cerr<-array(trlnorm(nsim*mult,rep(.Object@Cb,mult),rep(.Object@Cimp,mult)),c(nsim,nyears,nsubyears,nareas,nfleets))
Cerr<-array(trlnorm(nsim*mult,rep(1,mult),rep(.Object@Cimp,mult)),c(nsim,nyears,nsubyears,nareas,nfleets))
Cobsta<-array(NA,c(nsim,npop,nages,nyears,nsubyears,nareas,nfleets))
ind<-as.matrix(expand.grid(1:nsim,1:npop,1:nages,1:nyears,1:nsubyears,1:nareas,1:nfleets))
Cobsta[ind]<-C[ind]*Wt_age[ind[,1:4]]
Cobst<-apply(Cobsta,c(1,4:7),sum,na.rm=T)*Cerr
Cobsta<-apply(Cobsta,c(1,3:7),sum,na.rm=T)
# CPUE
Ierr<-array(trlnorm(nsim*mult,1,rep(.Object@Iimp,mult)),c(nsim,nyears,nsubyears,nareas,nfleets))
Ibeta<-exp(runif(nsim,log(Obs@Ibeta[1]),log(Obs@Ibeta[2])))
if(complexF==1){
# SYMRF SPAY M R F2
Iobst<-apply(VB[,,,1:nyears,,,],c(1, 4:7),sum)#^Ibeta
Isum<-apply(Iobst,c(1,5),mean)
ind<-as.matrix(expand.grid(1:nsim,1:nyears,1:nsubyears,1:nareas,1:nfleets))
#Iobst[ind]<-Ierr*(Iobst[ind]/Isum[ind[,c(1,5)]])
Iobst[ind]<-Iobst[ind]/Isum[ind[,c(1,5)]]
}else{
#Iobst<-Ierr*apply(VB[,,,1:nyears,,,],c(1,4,5,6,7),sum)#^Ibeta
apicalFage<-apply(OM@sel,1:2,which.max)
Iobst<-array(NA,dim=c(nsim,nyears,nsubyears,nareas,nfleets))
ind<-as.matrix(expand.grid(1:nsim,1:nyears,1:nsubyears,1:nareas,1:nfleets))
VBsum<-apply(VB[,,,1:nyears,,,],c(1,3:7),sum) # sum over pops
VBind<-cbind(ind[,1],apicalFage[ind[,c(1,5)]],ind[,2:5]) # add apical age to VBindex
#Iobst[ind]<--log(1-(Cobsta[VBind]/VBsum[VBind]))
Iobst[ind]<-OM@E[ind[,c(1,5,2,3,4)]]#(OMd@nsim,OMd@nfleets,OMd@nyears,OMd@nsubyears,OMd@nareas))
#Isum<-apply(Iobst,c(1,5),mean)
#Iobst[ind]<-(Iobst[ind]/Isum[ind[,c(1,5)]])
}
debugR<-F
if(debugR){
simo<-1
p<-1
age<-13
m<-1
f<-1
r<-1
ys<-1:25
test<-as.data.frame(cbind(Cobst[simo,ys,m,r,f],apply(VB[simo,,,ys,m,r,f],3,sum),Iobst[simo,ys,m,r,f],Cobst[simo,ys,m,r,f]/Iobst[simo,ys,m,r,f],FM[ss,p,8,ys,m,r,f],OM@E[ss,f,ys]))
test<-test/rep(apply(test,2,mean),each=nrow(test))
names(test)<-c("Cobs","VulnB","vBindex","Cobs/vBindex","FM","effort")
test
}
# Length composition
CALm<-array(NA,c(nsim,npop,nages,nyears,nsubyears,nareas,nfleets,nlen))
ind<-TEG(dim(CALm))
ALKind<-ind[,c(1,2,4,3,8)]
Cind<-ind[,1:7]
CALm[ind]<-C[Cind]*OM@iALK[ALKind]
# You were here simulating fishery independent SSB in the spawning area
ind<-as.matrix(expand.grid(1:nsim,1:npop,1:nages,1:nyears,OM@Recsubyr,1:nareas))
SSBtemp<-array(NA,c(nsim,npop,nages,nyears,nareas))
SSBtemp[ind[,c(1,2,3,4,6)]]<-N[ind]*Wt_age[ind[,1:4]]*OM@mat[ind[,1:4]]
SSBtemp<-apply(SSBtemp,c(1,2,4,5),sum)
SpawnA<-apply(SSBtemp,1:3,which.max)
FIobst<-array(NA,c(nsim,npop,nyears))
ind<-TEG(c(nsim,npop,nyears))
FIobst[ind]<-SSBtemp[cbind(ind,SpawnA[ind])]
meanFI<-apply(FIobst,1:2,mean)
FIobst[ind]<-FIobst[ind]/meanFI[ind[,1:2]]
FIerr<-array(trlnorm(nsim*mult,1,rep(.Object@Iimp,mult)),c(nsim,npop,nyears))
FIobst<-FIobst*FIerr
# Tagging data ---
nRPT<-2 # maximum number of timesteps that a tag may be recaptured in (n subyears)
temp<-rep(1:nsubyears,ceiling(nRPT/nsubyears)+nsubyears)
RPTind<-array(NA,c(nsubyears,nRPT))
for(ss in 1:nsubyears)RPTind[ss,]<-temp[ss:(ss+nRPT-1)]
for(sim in 1:nsim){ # Now loop over simulations, create data and write M3 files for parallel processing
simfolder<-paste(M3temp,sim,sep="")
if(!file.exists(simfolder))dir.create(simfolder)
file.copy(paste(OMDir,"/M3.exe",sep=""),simfolder,overwrite=T)
file.copy(paste(OMDir,"/M3.pin",sep=""),simfolder,overwrite=T)
datfile<-paste(simfolder,"/M3.dat",sep="")
#datfile<-"G:/M3/M3.dat"
# }
#sim<-1
print(sim)
print(Sys.time())
#datfile<-paste(OMDir,"/M3.dat",sep="")
cat("\n")
cat("Write data")
cat("\n")
# -- Model Dimensions --
write("# ny number of years",datfile,1,append=F)
write(nyears,datfile,1,append=T)
write("# ns number of subyears",datfile,1,append=T)
write(nsubyears,datfile,1,append=T)
write("# np number of populations/stocks",datfile,1,append=T)
write(npop,datfile,1,append=T)
write("# na number of age classes",datfile,1,append=T)
write(nages,datfile,1,append=T)
write("# nr number of regions/areas",datfile,1,append=T)
write(nareas,datfile,1,append=T)
write("# nf number of fleets",datfile,1,append=T)
write(nfleets,datfile,1,append=T)
write("# nl number of length classes",datfile,1,append=T)
write(nlen,datfile,1,append=T)
write("# nRPT maximum number of time steps that a PSAT can be recaptured",datfile,1,append=T)
write(nRPT,datfile,1,append=T)
write("# RPtind correct subyear recapture index",datfile,1,append=T)
write(t(RPTind),datfile,nRPT,append=T)
write("# sdur the duration of the various subyears (sums to 1)",datfile,1,append=T)
write(rep(1/nsubyears,nsubyears),datfile,nsubyears,append=T)
write("# nZeq: number of years at the start of the model to calculate equilibrium Z from",datfile,1,append=T)
write(nZeq,datfile,1,append=T)
write("# nydist: number of years over which initial stock distribution is calculated (prior to spool up)",datfile,1,append=T)
write(nydist,datfile,1,append=T)
write("# nyeq: number of spool-up years over which the stock is subject to nZeq, used to define equilibrium conditions",datfile,1,append=T)
write(nyeq,datfile,1,append=T)
write("# ml the mean length of the length categories",datfile,1,append=T)
write(mulen,datfile,nlen,append=T)
yblock<-5
RDblock<-rep(1:100,each=yblock)[1:nyears]
write("# RDblock the RD parameter for each year",datfile,1,append=T)
write(RDblock,datfile,nyears,append=T)
write("# nRD the number of estimated recruitment strengths",datfile,1,append=T)
if(complexRD==0)write(max(RDblock),datfile,nyears,append=T)
if(complexRD==1)write(nyears,datfile,nyears,append=T)
# -- Growth --
write("# iALK the age-length key by population and year p y a l",datfile,1,append=T)
write(tomt(OM@iALK[sim,,,,]),datfile,nlen,append=T)
write("# lwa weight-length parameter a w=al^ b",datfile,1,append=T)
write(OM@a,datfile,npop,append=T)
write("# lwa weight-length parameter b w=al^ b",datfile,1,append=T)
write(OM@b,datfile,npop,append=T)
write("# len_age (pay)",datfile,1,append=T)
write(OM@Len_age[sim,,,1:nyears],datfile,nyears,append=T)
write("# wt_age (pay)",datfile,1,append=T)
write(OM@Wt_age[sim,,,1:nyears],datfile,nyears,append=T)
# -- Maturity --
write("# Fec, fecundity at age, SSB at age",datfile,1,append=T)
write(t(Wt_age[sim,,,nyears]*OM@mat[sim,,,nyears]),datfile,nages,append=T)
write("# steep, steepness of the Bev-Holt SR relationship",datfile,1,append=T)
write(OM@h[sim,],datfile,npop,append=T)
# -- Spawning --
write("# spawns, the subyear in which the stock spawns",datfile,1,append=T)
write(OM@Recsubyr,datfile,npop,append=T)
#write("# spawnr, the fracton of recruits in each area",datfile,1,append=T)
#write(t(spawnr[sim,,]),datfile,nareas,append=T)
# -- Natural Mortality rate --
write("# Ma, instantaneous natural mortality rate at age",datfile,1,append=T)
write(t(M[sim,,,1]),datfile,nages,append=T)
# -- Fishery data --
# Catches / F init
if(complexF==1){
allobsbelow<-0.02 # level of catches at cumulative 2%
Cobs_cutoff<- min(Cobst[order(as.vector(Cobst))][cumsum(Cobst[order(as.vector(Cobst))])/sum(Cobst)>allobsbelow])
ind<-as.matrix(expand.grid(sim,1:nyears,1:nsubyears,1:nareas,1:nfleets))
cond<-Cobst[ind]>Cobs_cutoff
nind<-ind[cond,]
rat<-sum(Cobst[ind])/sum(Cobst[nind])
Cobs<-cbind(nind[,2:5],Cobst[nind])
Cobs[,5]<-Cobs[,5]*rat
}else{
ind<-as.matrix(expand.grid(sim,1:nyears,1:nsubyears,1:nareas,1:nfleets))
Cobs<-cbind(ind[,2:5],Cobst[ind])
}
#plot(density(Cobst[nind]))
#lines(density(Cobst[ind]),col='red')
#legend('topright',legend=c(round(rat,4),nrow(Cobs)))
write("# nCobs, the number of catch weight observations y s r f CW",datfile,1,append=T)
write(nrow(Cobs),datfile,1,append=T)
write("# Cobs, catch weight observations y s r f C(weight)",datfile,1,append=T)
write(t(Cobs),datfile,5,append=T)
# CPUE
ind<-as.matrix(expand.grid(sim,1:nyears,1:nsubyears,1:nareas,1:nfleets))
CPUEobs<-cbind(ind[,c(2:5,5)],Iobst[sim,,,,]) # fleet is index number
write("# nCPUE, the number of CPUE series",datfile,1,append=T)
write(nfleets,datfile,1,append=T) # in this simulation this is the same as the number of fleets
write("# nCPUEobs, the number of CPUE observations y s r f CPUE(weight)",datfile,1,append=T)
write(nrow(CPUEobs),datfile,1,append=T)
write("# CPUEobs, CPUE observations y s r f CPUE(weight)",datfile,1,append=T)
write(t(CPUEobs),datfile,6,append=T)
# Length composition
CALt<-CALm[sim,,,,,,,] # p a y s m f l
CALsum<-ceiling(apply(CALt,3:7,sum,na.rm=T)) # y s m f l
#CALtot<-apply(CALsum)
#par(mfrow=c(1,2))
#plot(CALsum[2,1,2,1,]/max(CALsum[2,1,2,1,]))
#lines(CALsum[2,1,2,2,]/max(CALsum[2,1,2,2,]),col='red')
#plot(OM@sel[sim,1,])
#lines(OM@sel[sim,2,],col='red')
ind<-as.matrix(expand.grid(1:nyears,1:nsubyears,1:nareas,1:nfleets,1:nlen))
cond<-CALsum>0
CLobs<-cbind(ind[cond,],CALsum[cond])
#CLobs<-cbind(ind,CALsum[ind])
sum(is.na(CLobs))
write("# nCLobs, the number of catch-at-length observations y s r f l N",datfile,1,append=T)
write(nrow(CLobs),datfile,1,append=T)
write("# CLobs, catch-at-length observations y s r f l N",datfile,1,append=T)
write(t(CLobs),datfile,6,append=T)
# The real relative abundance index RAI (y, s, r) !!! need to change this to real values
write("# RAI, Relative Abundance index r x s x y",datfile,1,append=T)
write(RAI[sim,,,],datfile,nyears,append=T)
# Fishery-independent indices y s r pp i type(biomass/ssb) index
ind<-as.matrix(expand.grid(sim,1:npop,1:nyears))
Iobs<-as.matrix(cbind(ind[,3],OM@Recsubyr[ind[,2]],SpawnA[ind],ind[,2],ind[,2],rep(2,nrow(ind)),FIobst[ind])) # type SSB
write("# nI, the number of fishery independent indices series",datfile,1,append=T)
write(npop,datfile,1,append=T) # in this simulation this is the same as the number of populations
write("# nIobs, the number of fishery independent observations y s r i type(biomass/ssb) index",datfile,1,append=T)
write(nrow(Iobs),datfile,1,append=T)
write("# Iobs, fishery independent observations y s r i type(biomass/ssb) index",datfile,1,append=T)
write(t(Iobs),datfile,7,append=T)
# PSAT tagging --
nPSATs<-10000
PSATdist<-apply(C[sim,,,,,,],c(1,2,4,5),sum,na.rm=T)^0.01
PSATdist<-PSATdist/apply(PSATdist,1,sum)
PSATdist<-ceiling(PSATdist/sum(PSATdist)*nPSATs)
nPSATs<-sum(PSATdist)
track<-array(NA,c(nPSATs,nRPT))
sy<-rep(NA,nPSATs)
SOO<-array(NA,c(nPSATs,npop))
nT<-1+ceiling(runif(nPSATs)*(nRPT-1)) # nT is the number of timesteps for recapture, this is set to 2 here,nRPT is the maximum number of timesteps that a tag may be recaptured
PSAT<-c(1,1,3,1,9,9)
PSAT2<-c(1,1,1,1,1,0.05,0.95)
j<-0
mov<-OM@mov[sim,,,,,]
for(pp in 1:npop){
for(aa in 1:nages){
for(ss in 1:nsubyears){
for(rr in 1:nareas){
if(PSATdist[pp,aa,ss,rr]>0){
for(i in 1:PSATdist[pp,aa,ss,rr]){
j<-j+1
SOO[j,]<-apply(C[sim,,aa,ceiling(nyears*0.7),ss,rr,],1,sum)/sum(C[sim,,aa,ceiling(nyears*0.7),ss,rr,]) #SPAYMRF
track[j,1]<-rr
sy[j]<-ss
#for(rpt in 2:nT[j]){
rpt<-2
m<-RPTind[ss,rpt]
track[j,rpt]<-(1:nareas)[rmultinom(1,1,mov[pp,aa,mref[m],track[j,rpt-1],])==1] #
SOO[j,]<-SOO[j,]*apply(C[sim,,aa,ceiling(nyears*0.7),m,track[j,rpt],],1,sum)/sum(C[sim,,aa,ceiling(nyears*0.7),m,track[j,rpt],])
#} # track length
SOO[j,]<-SOO[j,]/sum(SOO[j,])
#if(1%in%SOO[j,]){
# for(rpt in 2:nT[j]){
#m<-RPTind[ss,rpt]
PSAT<-rbind(PSAT,c(pp,OM@ma[aa,pp],ss,2,track[j,(rpt-1):rpt]))
#}
#}else{
# for(rpt in 2:nT[j]){
# #m<-RPTind[ss,rpt]
# PSAT2<-rbind(PSAT2,c(ss,2,track[j,(rpt-1):rpt],SOO[j,]))
#}
#}
} # tags
}
} # areas pp
} # ages
} # subyears
} # pops
PSAT<-PSAT[2:nrow(PSAT),]
PSAT<-aggregate(rep(1,nrow(PSAT)),by=list(PSAT[,1],PSAT[,2],PSAT[,3],PSAT[,4],PSAT[,5],PSAT[,6]),sum)
#testPSAT<-array(0,c(npop,OM@nma,nsubyears,nareas,nareas))
#testPSAT[as.matrix(PSAT[,c(1,2,3,5,6)])]<-PSAT[,7]
#PSAT2<-PSAT2[2:nrow(PSAT2),]
write("# nPSAT, PSATs data of known stock of origin p a s t fr tr N",datfile,1,append=T)
write(nrow(PSAT),datfile,1,append=T)
write("# PSAT data of known stock of origin p a s t fr tr N",datfile,1,append=T)
write(t(PSAT),datfile,7,append=T)
write("# nPSAT2, PSATs data of unknown stock of origin a s t fr tr SOO(npop)",datfile,1,append=T)
write(1,datfile,1,append=T)
#write(nrow(PSAT2),datfile,1,append=T)
write("# PSAT2 data of unknown stock of origin a s t fr tr SOO(npop)",datfile,1,append=T)
write(t(PSAT2),datfile,5+npop,append=T)
# Placeholder for conventional tags
Tag<-array(c(2,1,1,1,2,2,1,1,1,1),c(1,10))
write("# nTag, number of conventional tag observations y s r a - y s r f a N",datfile,1,append=T)
write(nrow(Tag),datfile,1,append=T)
write("# Tag, conventional tag observations y s r a - y s r f a N",datfile,1,append=T)
write(t(Tag),datfile,10,append=T)
# Stock of origin
NSOO<-min(ceiling(nyears*nsubyears*nareas/2),500) # number of data points in time and space
muSOO<-10 # mean number of observations at those points
SOO<-apply(C[sim,,,1:nyears,,,],1:5,sum,na.rm=T)
#Csum<-apply(SOO,2:4,sum)
rat<-mean(SOO,na.rm=T)/muSOO
SOO<-SOO/rat
ind<-expand.grid(1:nages,1:nyears,1:nsubyears,1:nareas)[sample(1:(nages*nyears*nsubyears*nareas),NSOO),]
ind<-as.matrix(cbind(rep(1:npop,rep=NSOO),ind[rep(1:NSOO,each=npop),]))
SOOobs<-as.matrix(cbind(ind,SOO[ind]))
SOOobs<-SOOobs[SOOobs[,6]>0,] # remove zeros
write("# nSOOobs, number of stock of origin observations p aa y s r N",datfile,1,append=T)
write(nrow(SOOobs),datfile,1,append=T)
write("# SOOobs, stock of origin observations p aa y s r N",datfile,1,append=T)
write(t(SOOobs),datfile,6,append=T)
# -- Selectivity controls
write("# nsel, number of estimated selectivities",datfile,1,append=T)
write(nfleets,datfile,1,append=T) # same as number of fleets
write("# seltype, 2:logistic, 3:Thompson",datfile,1,append=T)
write(c(2,3),datfile,nfleets,append=T) # first fleet is logistic
write("# selind, which selectivity is assigned to each fleet",datfile,1,append=T)
write(c(1,2),datfile,nfleets,append=T) # same as fleets
write("# ratiolim, limits on logistic slope parameter relative to inflection point",datfile,1,append=T)
write(c(0.1,1),datfile,nfleets,append=T) # same as fleets
write("# infleclim, limits on model selectivity",datfile,1,append=T)
write(c(4,15),datfile,nfleets,append=T) # same as fleets
# -- Movement estimation
mov<-array(NA,c(npop,OM@nma,nsubyears,nareas,nareas))
#mov[as.matrix(PSAT[,c(1,2,4,5)])]<-1
movind<-mov1<-c(1,1,1,1,1)
maclassfind<-match(1:OM@nma,OM@ma[,1])
mov<-OM@mov[sim,,maclassfind,,,] # p ma s fr tr
mov[mov>0]<-1
notanarea<-apply(mov,c(1,2,3,5),sum) # p ma s tr
notanarea<-array(as.integer(notanarea>0),dim(notanarea))
can<-apply(mov,c(1,5),sum) # can a movement happen from this area for this stock?
can<-array(as.integer(can>0),dim(can))
ind<-TEG(dim(mov))
mov[ind]<-mov[ind]*notanarea[ind[,c(1,2,3,4)]]
for(pp in 1:npop){
for(ma in 1:OM@nma){
for(ss in 1:nsubyears){
for(rr in 1:nareas){
np<-sum(mov[pp,ma,ss,rr,],na.rm=T)
if(np>0){
fR<-match(1,mov[pp,ma,ss,rr,])
mov1<-rbind(mov1,c(pp,ma,ss,rr,fR))
if(np>1){
oR<-grep(1,mov[pp,ma,ss,rr,])
oR<-oR[oR!=fR]
for(i in 1:length(oR)){
movind<-rbind(movind,c(pp,ma,ss,rr,oR[i]))
}
}
}
}
}
}
}
movind<-movind[2:nrow(movind),]
mov1<-mov1[2:nrow(mov1),]
if(movtype==1){ # if a gravity formulation these indices are for the to area that should be estimated by season
firstr<-apply(can,1,which.max)
mov1<-TEG(c(npop,OM@nma,nsubyears))
mov1<-cbind(mov1,firstr[mov1[,1]],rep(999,nrow(mov1)))
#mov1<-cbind(rep(1:npop,each=nsubyears),rep(1:nsubyears,npop),firstr[rep(1:npop,each=nsubyears)],rep(999,nsubyears*npop))
can2<-can
can2[cbind(1:npop,firstr)]<-0
can2<-t(can2)
nrest<-apply(can2,1,sum)
indr<-array(1:nareas,c(nareas,npop))
indp<-array(rep(1:npop,each=nareas),c(nareas,npop))
rs<-indr[can2==1]
ps<-indp[can2==1]
movindo<-cbind(rep(ps,each=nsubyears),rep(1:nsubyears,length(rs)),rep(rs,each=nsubyears),rep(999,length(rs)*nsubyears))
movind<-array(rep(movindo,each=OM@nma),dim=c(nrow(movindo)*OM@nma,4))
movind<-cbind(movind[,1],rep(1:OM@nma,nrow(movindo)),movind[,2:4])
}
write("# nMP, number of estimated movement parameters",datfile,1,append=T)
if(movtype==1)write(nrow(movind)+nsubyears*OM@nma*npop,datfile,1,append=T)
if(movtype==2)write(nrow(movind),datfile,1,append=T)
write("# nma, number of estimated movement age classes",datfile,1,append=T)
write(OM@nma,datfile,1,append=T)
write("# ma, assignment of age classes to age",datfile,1,append=T)
write(OM@ma,datfile,nages,append=T)
write("# nmovind, number of estimated movement parameters minus viscosity",datfile,1,append=T)
write(nrow(movind),datfile,1,append=T)
write("# movind, the location of estimated movement parameters p s r r",datfile,1,append=T)
write(t(movind),datfile,5,append=T)
write("# nmov1, number of initial non-estimated movement parameters",datfile,1,append=T)
write(nrow(mov1),datfile,1,append=T)
write("# mov1, the location of initial non-estimated movement parameters p s r r",datfile,1,append=T)
write(t(mov1),datfile,5,append=T)
write("# movtype, the type of movement parameterization 1: gravity 2:markov matrix",datfile,1,append=T)
write(movtype,datfile,1,append=T)
# -- Observation errors
write("# CobsCV, lognormal CV of the observed catches",datfile,1,append=T)
write(rep(0.2,nfleets),datfile,nfleets,append=T)
write("# CPUEobsCV, lognormal CV of the CPUE indices",datfile,1,append=T)
write(rep(0.2,nfleets),datfile,nfleets,append=T) # CPUE index for each fleet
write("# IobsCV, lognormal CV of the fishery independent indices",datfile,1,append=T)
write(rep(0.2,npop),datfile,npop,append=T) # SSB index for each population
# -- Priors
write("# RDCV, lognormal penalty on recruitment deviations",datfile,1,append=T)
write(2,datfile,1,append=T) # SSB index for each population
write("# nLHw, number of likelihood weights",datfile,1,append=T)
write(10,datfile,1,append=T) # SSB index for each population
write("# LHw, likelihood weights (1 catch, 2 cpue, 3 FIindex, 4 Lcomp, 5 SOO, 6 PSAT, 7 PSAT2, 8 RecDev, 9 mov, 10 sel)",datfile,1,append=T)
write(c( 10, 1, 1/100000000, 1/1000, 1/10, 1/100, 1, 1, 1, 2),datfile,10,append=T) # SSB index for each population
# -- Initial values
write("# R0_ini, initial values for log R0",datfile,1,append=T)
write(OM@R0[sim,],datfile,npop,append=T) # Simulated R0 for each population
write("# sel_ini, initial values for selectivity",datfile,1,append=T)
write(t(OM@sel[sim,,]),datfile,nlen,append=T) # Actual selectivity
write("# selpar_ini, initial values for selectivity parameters",datfile,1,append=T)
write(t(OM@selpars[sim,,]),datfile,3,append=T) # Actual selectivity
#RFL<-array(NA,c(nsim,nfleets,nlen,nyears,nsubyears,nareas))
Fsub<-array(NA,c(nyears,nsubyears,nareas,nfleets))
indt<-as.matrix(expand.grid(sim,1:nfleets,1:nyears,1:nsubyears,1:nareas))
Fsub[indt[,c(3,4,5,2)]]<-OM@E[indt]*OM@q[indt[,1:2]]
# old-----
#Fsum<-RF[sim,1,,,,,]
#ind<-TEG(c(nyears,nsubyears,nareas,nfleets))
#ind<-as.matrix(cbind(maxv[ind[,4]],ind[]))
#Fsub<-array(Fsum[ind],c(nyears,nsubyears,nareas,nfleets))
#-----
if(complexF==0)lnFini<-log(as.vector(Fsub))
if(complexF==1)lnFini<-log(Fsub[nind[,2:5]])
write("# lnF_ini, initial values for log F",datfile,1,append=T)
write(lnFini,datfile,nrow(Cobs),append=T) # log apical F
write("# ilnRD_ini, initial recruitment deviations y=1 a=2:nages",datfile,1,append=T)
write(array(0,c(nages-1,npop)),datfile,nages-1,append=T) # Initial recruitment deviations
write("# lnRD_ini, initial recruitment deviations y=1:nyears",datfile,1,append=T)
write(log(t(OM@Recdevs[sim,,1:nyears])),datfile,nyears,append=T) # Recruitment deviations
write("# mov_ini, simulated movement p s a r r",datfile,1,append=T) # this is a pain: M3 is p s a r r, OM@mov is p a s r r (oh well)
movt<-OM@mov[sim,,,,,] # p a s r r
mov<-array(NA,dim(movt)[c(1,3,2,4,5)]) # p s a r r
ind<-TEG(dim(movt))
mov[ind[,c(1,3,2,4,5)]]<-movt[ind]
write(tomt(mov),datfile,nareas,append=T) # Movement probabilities
write("# qCPUE_ini, initial values for CPUE catchability nCPUE",datfile,1,append=T)
if(complexF==1)write(log(1/Isum[sim,]),datfile,nfleets,append=T) # CPUE catchabilities I=qVB
if(complexF==0){
#apicalF<-apply(FM[sim,,,1:nyears,,,],c(1,3,4,5,6),max,na.rm=T)
write(log(OM@q[sim,]),datfile,nfleets,append=T) # CPUE catchabilities I=qVB
}
write("# qI_ini, initial values for fishery independent catchability nI",datfile,1,append=T)
write(log(rep(1,nfleets)),datfile,nfleets,append=T) # Catchabilities I=qSSB or I=qB
write("# D_ini, simulated depletion SSB/SSB0",datfile,1,append=T)
write(D[sim,],datfile,nfleets,append=T) # Catchabilities I=qSSB or I=qB
write("# complexRD 1= run with full estimation of all recruitment deviations by year",datfile,1,append=T)
write(complexRD,datfile,1,append=T) # debug switch
write("# complexF 1= run with full estimation of all F's by year, subyear, fleet, region",datfile,1,append=T)
write(complexF,datfile,1,append=T) # debug switch
write("# nF either nCobs or 1 if complexF=0",datfile,1,append=T)
if(complexF==0)write(1,datfile,1,append=T)
if(complexF==1)write(nrow(Cobs),datfile,1,append=T)
write("# debug 1= run with initial values",datfile,1,append=T)
write(0,datfile,1,append=T) # debug switch
write("# verbose 1= run with printouts",datfile,1,append=T)
write(verbose,datfile,1,append=T) # debug switch
write("# datacheck",datfile,1,append=T)
write(991199,datfile,1,append=T) # datacheck
#system(paste(OMDir,"M3.exe -est",sep="/"),wait=T,show.output.on.console = F) # run the exe
#if(sim==1)pin_from_par(file=paste(OMDir,"/M3",sep=""))
# Store results
#out[[sim]]<-M3read(OMDir)
}
spawnr=c(4,1)
B0<-apply(N[,,,1,1,]*array(Wt_age[,,,1],c(nsim,npop,nages,nareas)),c(1:2,4),sum)
B0<-B0/array(apply(B0,1:2,sum),dim(B0))
Bind<-expand.grid(1:nsim,1:npop)
Bfrac<-matrix(B0[as.matrix(cbind(Bind,spawnr[Bind[,2]]))],ncol=npop)
SSB1<-apply(N[,,,1,1,]*
array(Wt_age[,,,1]*OM@mat[,,,1],c(nsim,npop,nages,nareas)),1:2,sum)
SSBcur<-apply(N[,,,nyears,nsubyears,]*
array(Wt_age[,,,nyears]*OM@mat[,,,nyears],c(nsim,npop,nages,nareas)),1:2,sum)
Bcur<-apply(N[,,,nyears,nsubyears,]*
array(Wt_age[,,,nyears],c(nsim,npop,nages,nareas)),1:2,sum)
Cobsta<-array(NA,c(nsim,npop,nages,nsubyears,nareas,nfleets))
ind<-as.matrix(expand.grid(1:nsim,1:npop,1:nages,nyears,1:nsubyears,1:nareas,1:nfleets))
Cobsta[ind[,c(1:3,5:7)]]<-C[ind]*Wt_age[ind[,1:4]]
Cobsta<-apply(Cobsta,c(1,2,6),sum)
Ct<-apply(Cobsta,1:2,sum)
Urat<-Cobsta/array(Ct,dim(Cobsta))
U<-Ct/(apply(Bcur,1,sum)+Ct)
B0t<-apply(N[,,,1,1,]*array(Wt_age[,,,1],c(nsim,npop,nages,nareas)),c(1:2),sum)
ratB0<-B0t/apply(B0t,1,sum)
ratBcur<-Bcur/apply(Bcur,1,sum)
.Object@simlist<-list(SSB0=SSB0,D=SSBcur/SSB0,D1=SSBcur/SSB1,B0=B0t,Bfrac=Bfrac,Bcur=Bcur,Urat=Urat,U=U,ratB0=ratB0,ratBcur=ratBcur)
#Bfracp<-t(sapply(1:nsim,getBfrac,out,spawnr=spawnr))
#Bfracbias<-(Bfracp-Bfrac)/Bfrac
# Bias in current depletion (SSB)
#Dp<-t(sapply(1:nsim,getdep,out))
#Dbias<-(Dp-D)/D
# Bias in current SSB (absolute)
#SSBp<-t(sapply(1:nsim,getSSBnow,out))
#SSBbias<-(SSBp-Bcur)/Bcur
#Perf<-data.frame(Dbias,SSBbias,Bfracbias)
#names(Perf)<-c(paste("Dbias",1:npop,sep="_"),paste("SSBtbias",1:npop,sep="_"),paste("Bfracbias",1:npop,sep="_"))
#.Object@Perf<-Perf
#.Object@C[MP,,,]<-apply(C[,,,1:allyears,,,]*array(Wt_age[,,,1:allyears],c(nsim,npop,nages,allyears,nsubyears,nareas,nfleets)),c(1,2,4),sum)
#SSB<-apply(SSN[,,,1:allyears,4,]*array(Wt_age[,,,1:allyears],c(nsim,npop,nages,allyears,nareas)),c(1,2,4),sum)
#.Object@D[MP,,,]<-SSB/apply(SSB0,1,sum)
#B<-apply((SSN[,,,1:allyears,4,]+NSN[,,,1:allyears,4,])*array(Wt_age,c(nsim,npop,nages,allyears,nareas)),c(1:2,4),sum)
#Bthen<-apply((SSN[,,,1,4,]+NSN[,,,1,4,])*array(Wt_age[,,,1],c(nsim,npop,nages,nareas)),1:2,sum)
#.Object@B_BMSY[MP,,]<-apply(array(B[,targpop,],dim=c(nsim,length(targpop),allyears)),c(1,3),sum)/OM@BMSY
#U<-apply(array(.Object@C[MP,,targpop,],c(nsim,length(targpop),allyears)),c(1,3),sum)/
#apply(array(VBA[,targpop,,1:allyears,4,],c(nsim,length(targpop),nages,allyears,nareas)),c(1,4),sum)
#.Object@F_FMSY[MP,,]<-U/OM@UMSY
#cat("\n")
# #.Object@MPs<-MPs
.Object
})
# Operating model definition object ---------------------------------------------------------------------------------------------------------------------
setClass("OMd",representation(
# Description
Name="character",Date="character",Author="character",
Notes="character",PrimarySource="character",
# Dimensions
nsim="integer",npop="integer",nages="integer", # MSE dimensions
nyears="integer",nsubyears="integer",nareas="integer", # MSE dimensions
proyears="integer",nlen="integer",lenbins="numeric", # Projected years
interval="integer", # Update interval
nma="integer",ma="array", # Number of movement age classes, age class definitions
# Parameter ranges / simulation sample distributions
Magemu="array",Mrange="array",Msd="array",Mgrad="array", # Mean natural mortality rate at age, sample range, interannual variability and gradient % yr-1
SRrel="integer",h="array",recgrad="array", # Stock-recruitment relationship type, steepness, underlying gradient % yr-1
Reccv="array",AC="array", Recsubyr="integer", # CV of recruitment deviations and recruitment auto-correlation
Linf="array",K="array",t0="numeric", # Mean growth parameters
Ksd="array",Kgrad="array",Linfsd="array",Linfgrad="array", # Interannual variability in growth and mean trajectory % yr-1
a="numeric",b="numeric", # Weight - Length conversion W=aL^b
ageM="array",ageMsd="array",ageMgrad="array", # Age-at-maturity, interannual variability and gradient % yr-1
D="array",R0="array", # Current stock depletion, abundance
Size_area="array",mov="array", # Size of regions, Markov movement matrix for all fish and mature fish
movvar="numeric",movsd="array",movgrad="array", # Inter-simulation variability in movement, interannual-variability in movement, gradient changes in area gravity weights
excl="array", # Exclusion matrix [0,1] depending on whether the stock can go there
# Fleet specifications
nfleets="integer", # Number of fleets,
L05="array",VmaxL="array", LFS="array", # Length at 5% vulnerability, vulnerability of largest fish, length at full selection
Fsd="array",Fgrad="array", Frat="numeric", # Interannual variability in F, Final gradient in F yr-1
Spat_targ="array", # Spatial targetting parameter F =prop= V^Spat_targ
Area_names="character", Area_defs="list", # Area definitions (polygons)
targpop="numeric", # The target population for calculation of MSY and depletion reference points
#nZeq="integer", # The number of initial years to calculation equilibrium F
nydist="integer", # The number of years (iterations) taken to find equilibrium spatial distribution
#nyeq="integer", # The number of years (iterations) taken to find equilibrium F
# Observation properties relevant to trial specifications
Cbias="numeric",
# Misc
seed="numeric" # Random seed for the generation of the OM
))
# Plot spatial definitions of the OMd object
setMethod("plot", signature(x = "OMd"),function(x){
OMd<-x
cols<-rep(c("#ff000040","#00ff0040","#0000ff40","#00000040","#ff00ff40"),4)
res<-0.03
map(database = "worldHires",xlim=c(-105,50),ylim=c(-55,85),mar=rep(0,4),resolution=res)
abline(v=(-20:20)*10,col='light grey')
abline(h=(-20:20)*10,col='light grey')
abline(v=0,col="green")
abline(h=0,col="green")
map(database = "worldHires",mar=rep(0,4),border=0,xlim=c(-105,50), ylim=c(-55,85),add=T,fill=T,col="light grey",resolution=res)
for(i in 1:length(OMd@Area_names)){
polygon(OMd@Area_defs[[i]],border='blue',lwd=2,col=NA)#cols[i])
text(mean(OMd@Area_defs[[i]]$x),2.5+mean(OMd@Area_defs[[i]]$y),OMd@Area_names[i],col='red',font=2,cex=0.6)
}
})
#setMethod("plot", signature(x = "MSE"),function(x){
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_functions.R
\name{plot.shrinkTVP_forc}
\alias{plot.shrinkTVP_forc}
\title{Graphical summary of posterior predictive density}
\usage{
\method{plot}{shrinkTVP_forc}(x, showgap = FALSE, ...)
}
\arguments{
\item{x}{a \code{shrinkTVP_forc} object.}
\item{showgap}{if \code{showgap = FALSE}, the gap between the historical observations and the forecasts is removed.
The default value is \code{FALSE}.}
\item{...}{further arguments to be passed to \code{plot}.}
}
\value{
Called for its side effects and returns invisibly.
}
\description{
\code{plot.shrinkTVP_forc} generates plots visualizing the posterior predictive density generated by \code{forecast_shrinkTVP}.
}
\examples{
\donttest{
set.seed(123)
sim <- simTVP()
train <- sim$data[1:190, ]
test <- sim$data[191:200, ]
res <- shrinkTVP(y ~ x1 + x2, train)
forecast <- forecast_shrinkTVP(res, test)
plot(forecast)
lines(sim$data$y, col = "forestgreen")
}
}
\seealso{
Other plotting functions:
\code{\link{plot.mcmc.tvp}()},
\code{\link{plot.shrinkTVP}()}
}
\author{
Peter Knaus \email{peter.knaus@wu.ac.at}
}
\concept{plotting functions}
| /man/plot.shrinkTVP_forc.Rd | no_license | cran/shrinkTVP | R | false | true | 1,177 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_functions.R
\name{plot.shrinkTVP_forc}
\alias{plot.shrinkTVP_forc}
\title{Graphical summary of posterior predictive density}
\usage{
\method{plot}{shrinkTVP_forc}(x, showgap = FALSE, ...)
}
\arguments{
\item{x}{a \code{shrinkTVP_forc} object.}
\item{showgap}{if \code{showgap = FALSE}, the gap between the historical observations and the forecasts is removed.
The default value is \code{FALSE}.}
\item{...}{further arguments to be passed to \code{plot}.}
}
\value{
Called for its side effects and returns invisibly.
}
\description{
\code{plot.shrinkTVP_forc} generates plots visualizing the posterior predictive density generated by \code{forecast_shrinkTVP}.
}
\examples{
\donttest{
set.seed(123)
sim <- simTVP()
train <- sim$data[1:190, ]
test <- sim$data[191:200, ]
res <- shrinkTVP(y ~ x1 + x2, train)
forecast <- forecast_shrinkTVP(res, test)
plot(forecast)
lines(sim$data$y, col = "forestgreen")
}
}
\seealso{
Other plotting functions:
\code{\link{plot.mcmc.tvp}()},
\code{\link{plot.shrinkTVP}()}
}
\author{
Peter Knaus \email{peter.knaus@wu.ac.at}
}
\concept{plotting functions}
|
rm(list=ls(all=TRUE))
##################################################################
## read data & hyperparameters
##################################################################
library(MASS)
library(mvtnorm)
cal.pi <- function(alpha,beta,x)
{
pi_val <- 1/(1+exp(alpha+beta*x))
return(pi_val)
}
cal.poster <- function(alpha,beta,x,n,y)
{
likeli <- 1
nx <- length(x)
for (i in 1:nx)
likeli = likeli*dbinom(y[i], n[i], cal.pi(alpha,beta,x[i]), log = FALSE)
likeli = likeli*dmvnorm(c(alpha,beta),c(0,0),sqrt(100)*diag(2))
return(likeli)
}
alpha_list <- seq(from=-4,to=2,by = 0.1)
beta_list <- seq(from=-8,to=1,by = 0.1)
n1 <- length(alpha_list) ## length for alpha and beta list
n2 <- length(beta_list)
## read data
dta <- scan("/Users/zk794/Box Sync/Monte Carlo Methods in Stats/M3/sage.dta")
data = matrix(dta,4,4)
x = data[2,]
n = data[3,]
y = data[4,]
poster = matrix(1,n1,n2)
for (j in 1:n1)
for (k in 1:n2)
poster[j,k] = cal.poster(alpha_list[j],beta_list[k],x,n,y)
poster = poster/sum(poster)
#lled.contour(alpha_list, beta_list, poster,color = function(x)rev(rainbow(x)), xlab = "alpha", ylab =
"beta")
contour(alpha_list, beta_list, poster, xlab = "alpha", ylab = "beta")
######1b
###potential energy
potU = function(q){
poster = cal.poster(q[1],q[2],x,n,y)
return(-log(poster))
}
##gradient of potential energy
gradU = function(q){
tmp = exp(q[1] + q[2]*x)
grad_alpha = q[1]/100 + sum(y-n) + sum(n*tmp/(1+tmp))
grad_beta = q[2]/100 + sum(x*(y-n)) + sum(n*tmp*x/(1+tmp))
return(c(grad_alpha,grad_beta))
}
###Hamiltonian function
HMC = function (epsilon=0.01, L=100, q0)
{
q = q0
p0 = rnorm(length(q),0,1) # independent standard normal variates
p = p0
# Full steps for position and momentum
for (i in 1:L)
{
grad_U = gradU(q)
p = p - epsilon * grad_U / 2
q = q + epsilon * p
grad_U = gradU(q)
p = p - epsilon * grad_U/2
}
# Evaluate potential and kinetic energies at start and end of trajectory
U0 = potU(q0)
K0 = sum(p0^2) / 2
U1 = potU(q)
K1 = sum(p0^2) / 2
# Accept or reject the state at end of trajectory
rate = exp(U0-U1+K0-K1)
if (runif(1) < rate)
{
return (q) # accept
}
else
{
return (q0) # reject
}
}
#####initialization###
theta_b = matrix(c(rep(2*1000)),1000,2) ####storing alpha and beta
q0 = c(-5,-10)
niter = 1000
for(i in 1:niter){
####iterations
if (runif(1) < 0.5)
{
theta_b[i,] = HMC(0.01,100,q0) ###forward
}
else
{
theta_b[i,] = HMC(-0.01,100,q0) ###backward
}
q0 = theta_b[i,]
}
points(theta_b[,1],theta_b[,2],col = "red",pch=20, cex=.5)
###########################
#########1c ###############
mh.transition <- function(q0)
{
s = 10
q1 = mvrnorm(1,q0,s*diag(2))
likeli0 = cal.poster(q0[1],q0[2],x,n,y)
likeli1 = cal.poster(q1[1],q1[2],x,n,y)
acc_prob <- likeli1/likeli0
if (runif(1) < acc_prob)
{ # accept
qstar <- q1 # replace
}
else
{
qstar = q0
}
return(qstar)
}
mh <- function(n.iter=100)
{
## initialize the parameters
q0 = c(-5,-10)
qm = matrix(c(rep(NA,n.iter*2)),n.iter,2) ###store all the alpha and beta
for(iter in 1:n.iter){
q1 = mh.transition(q0)
qm[iter,] = q1
q0 = q1
}
return(qm)
}
theta_c = mh(1000)
points(theta_c[,1],theta_c[,2],col = "blue",pch=19,cex=.5)
legend("topright", c("HMC", "MH"), pch = c(20,19), col = c("red","blue")) | /m3.R | no_license | zkftyj-zz/R | R | false | false | 3,185 | r | rm(list=ls(all=TRUE))
##################################################################
## read data & hyperparameters
##################################################################
library(MASS)
library(mvtnorm)
cal.pi <- function(alpha,beta,x)
{
pi_val <- 1/(1+exp(alpha+beta*x))
return(pi_val)
}
cal.poster <- function(alpha,beta,x,n,y)
{
likeli <- 1
nx <- length(x)
for (i in 1:nx)
likeli = likeli*dbinom(y[i], n[i], cal.pi(alpha,beta,x[i]), log = FALSE)
likeli = likeli*dmvnorm(c(alpha,beta),c(0,0),sqrt(100)*diag(2))
return(likeli)
}
alpha_list <- seq(from=-4,to=2,by = 0.1)
beta_list <- seq(from=-8,to=1,by = 0.1)
n1 <- length(alpha_list) ## length for alpha and beta list
n2 <- length(beta_list)
## read data
dta <- scan("/Users/zk794/Box Sync/Monte Carlo Methods in Stats/M3/sage.dta")
data = matrix(dta,4,4)
x = data[2,]
n = data[3,]
y = data[4,]
poster = matrix(1,n1,n2)
for (j in 1:n1)
for (k in 1:n2)
poster[j,k] = cal.poster(alpha_list[j],beta_list[k],x,n,y)
poster = poster/sum(poster)
#lled.contour(alpha_list, beta_list, poster,color = function(x)rev(rainbow(x)), xlab = "alpha", ylab =
"beta")
contour(alpha_list, beta_list, poster, xlab = "alpha", ylab = "beta")
######1b
###potential energy
potU = function(q){
poster = cal.poster(q[1],q[2],x,n,y)
return(-log(poster))
}
##gradient of potential energy
gradU = function(q){
tmp = exp(q[1] + q[2]*x)
grad_alpha = q[1]/100 + sum(y-n) + sum(n*tmp/(1+tmp))
grad_beta = q[2]/100 + sum(x*(y-n)) + sum(n*tmp*x/(1+tmp))
return(c(grad_alpha,grad_beta))
}
###Hamiltonian function
HMC = function (epsilon=0.01, L=100, q0)
{
q = q0
p0 = rnorm(length(q),0,1) # independent standard normal variates
p = p0
# Full steps for position and momentum
for (i in 1:L)
{
grad_U = gradU(q)
p = p - epsilon * grad_U / 2
q = q + epsilon * p
grad_U = gradU(q)
p = p - epsilon * grad_U/2
}
# Evaluate potential and kinetic energies at start and end of trajectory
U0 = potU(q0)
K0 = sum(p0^2) / 2
U1 = potU(q)
K1 = sum(p0^2) / 2
# Accept or reject the state at end of trajectory
rate = exp(U0-U1+K0-K1)
if (runif(1) < rate)
{
return (q) # accept
}
else
{
return (q0) # reject
}
}
#####initialization###
theta_b = matrix(c(rep(2*1000)),1000,2) ####storing alpha and beta
q0 = c(-5,-10)
niter = 1000
for(i in 1:niter){
####iterations
if (runif(1) < 0.5)
{
theta_b[i,] = HMC(0.01,100,q0) ###forward
}
else
{
theta_b[i,] = HMC(-0.01,100,q0) ###backward
}
q0 = theta_b[i,]
}
points(theta_b[,1],theta_b[,2],col = "red",pch=20, cex=.5)
###########################
#########1c ###############
mh.transition <- function(q0)
{
s = 10
q1 = mvrnorm(1,q0,s*diag(2))
likeli0 = cal.poster(q0[1],q0[2],x,n,y)
likeli1 = cal.poster(q1[1],q1[2],x,n,y)
acc_prob <- likeli1/likeli0
if (runif(1) < acc_prob)
{ # accept
qstar <- q1 # replace
}
else
{
qstar = q0
}
return(qstar)
}
mh <- function(n.iter=100)
{
## initialize the parameters
q0 = c(-5,-10)
qm = matrix(c(rep(NA,n.iter*2)),n.iter,2) ###store all the alpha and beta
for(iter in 1:n.iter){
q1 = mh.transition(q0)
qm[iter,] = q1
q0 = q1
}
return(qm)
}
theta_c = mh(1000)
points(theta_c[,1],theta_c[,2],col = "blue",pch=19,cex=.5)
legend("topright", c("HMC", "MH"), pch = c(20,19), col = c("red","blue")) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/highdim.R
\name{impute_residaug}
\alias{impute_residaug}
\title{Impute the controls after fitting gynsth and reweighting residuals}
\usage{
impute_residaug(outcomes, metadata, fit, trt_unit)
}
\arguments{
\item{outcomes}{Tidy dataframe with the outcomes and meta data}
\item{metadata}{Dataframe with metadata, in particular a t_int column}
\item{fit}{Output of fit_gsynaug_formatted}
}
\value{
outcomes with additional synthetic control added,
synth weights
outcome regression weights
}
\description{
Impute the controls after fitting gynsth and reweighting residuals
}
| /man/impute_residaug.Rd | no_license | ebenmichael/ents | R | false | true | 666 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/highdim.R
\name{impute_residaug}
\alias{impute_residaug}
\title{Impute the controls after fitting gynsth and reweighting residuals}
\usage{
impute_residaug(outcomes, metadata, fit, trt_unit)
}
\arguments{
\item{outcomes}{Tidy dataframe with the outcomes and meta data}
\item{metadata}{Dataframe with metadata, in particular a t_int column}
\item{fit}{Output of fit_gsynaug_formatted}
}
\value{
outcomes with additional synthetic control added,
synth weights
outcome regression weights
}
\description{
Impute the controls after fitting gynsth and reweighting residuals
}
|
#' Print the sql of an analysis
#'
#' @details
#' Print the parameterized SQL that is run for an analysisId.
#'
#' @param analysisId An analysisId for which the sql will be printed.
#' @return
#' None
#'
#' @export
printAnalysesSql<- function(analysisId){
sql <- tryCatch({
sql = SqlRender::loadRenderTranslateSql( sqlFilename = file.path("analyses", paste(analysisId, "sql", sep = ".")),
tdbms = connectionDetails$dbms, packageName = "CatalogueExport")
cat(sql)
}, error = function (e) {
cat("analysisId does not exist ")
}, finally = {
})
} | /R/Helper.R | permissive | awsid9/CatalogueExport | R | false | false | 621 | r | #' Print the sql of an analysis
#'
#' @details
#' Print the parameterized SQL that is run for an analysisId.
#'
#' @param analysisId An analysisId for which the sql will be printed.
#' @return
#' None
#'
#' @export
printAnalysesSql<- function(analysisId){
sql <- tryCatch({
sql = SqlRender::loadRenderTranslateSql( sqlFilename = file.path("analyses", paste(analysisId, "sql", sep = ".")),
tdbms = connectionDetails$dbms, packageName = "CatalogueExport")
cat(sql)
}, error = function (e) {
cat("analysisId does not exist ")
}, finally = {
})
} |
/Dados_de_algodao/Modelagem GLMM PC - C1 C5 e C8.R | no_license | rceratti/Dissertacao | R | false | false | 8,010 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internals.R
\name{lavaan_to_list}
\alias{lavaan_to_list}
\title{Extracts matrices from a lavaan object.}
\usage{
lavaan_to_list(object)
}
\arguments{
\item{object}{A \code{lavaan} object.}
}
\value{
A list containing Lambda, Psi, and Gamma.
}
\description{
Extracts matrices from a lavaan object.
}
\keyword{internal}
| /man/lavaan_to_list.Rd | permissive | JonasMoss/reliable | R | false | true | 396 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internals.R
\name{lavaan_to_list}
\alias{lavaan_to_list}
\title{Extracts matrices from a lavaan object.}
\usage{
lavaan_to_list(object)
}
\arguments{
\item{object}{A \code{lavaan} object.}
}
\value{
A list containing Lambda, Psi, and Gamma.
}
\description{
Extracts matrices from a lavaan object.
}
\keyword{internal}
|
IC<-c(0,1)
percentage<-c(0.7344013,0.2655987)
data<-data.frame(IC,percentage)
barplot(data,main="Population who Recieved an IC",xlab="IC")
c <- ggplot(data, aes(x=IC,y=percentage))
c+geom_bar()
data<-data.frame(IC=c("Didn't receive an IC","Received an IC"),percentage=c(0.7344013,0.2655987))
ggplot(data, aes(x=IC,y = percentage))+
geom_bar(stat = "identity")+
geom_text(aes(label=percentage),vjust=0)+
ggtitle("Population of Interest")+
theme(plot.title = element_text(lineheight=.7, face="bold",hjust = 0.5))+
scale_y_continuous(limits=c(0,1))
data_race<-data.frame(race=c("White","Black","Hispanic","Other"),percentage=c(0.266,0.250,0.283,0.275))
ggplot(data_race, aes(x=race,y = percentage))+
geom_bar(stat = "identity")+
geom_text(aes(label=percentage),vjust=0)+
ggtitle("Pop of Interest that Received an IC by Race")+
theme(plot.title = element_text(lineheight=.7, face="bold",hjust = 0.5))+
scale_y_continuous(limits=c(0,1))
data_pay<-data.frame(pay=c('Private','M-care','M-caid','Workers','Self-Pay','No Charge','Other','Don\'t know'),percentage=c(0.283,0.233,0.284,0.140,0.193,0.271,0.243,0.334))
ggplot(data_pay, aes(x=pay,y = percentage))+
geom_bar(stat = "identity")+
geom_text(aes(label=percentage),vjust=0)+
ggtitle("Pop of Interest that Received an IC by Pay Method")+
theme(plot.title = element_text(lineheight=.7, face="bold",hjust = 0.5))+
scale_y_continuous(limits=c(0,1))
data_year<-data.frame(year=c(2005,2006,2007,2008,2009,2010),percentage=c(0.231,0.253,0.270,0.290,0.246,0.302))
ggplot(data_year,aes(x=year,y=percentage))+
geom_line()+
geom_point()+
geom_text(aes(label=percentage),vjust=3)+
ggtitle("Pop of Interest that Received an IC by Year")+
theme(plot.title = element_text(lineheight=.7,hjust = 0.5))+
scale_y_continuous(limits=c(0,1))
| /health_Code.R | no_license | apruch26/Graph_health_analytics_results- | R | false | false | 1,831 | r | IC<-c(0,1)
percentage<-c(0.7344013,0.2655987)
data<-data.frame(IC,percentage)
barplot(data,main="Population who Recieved an IC",xlab="IC")
c <- ggplot(data, aes(x=IC,y=percentage))
c+geom_bar()
data<-data.frame(IC=c("Didn't receive an IC","Received an IC"),percentage=c(0.7344013,0.2655987))
ggplot(data, aes(x=IC,y = percentage))+
geom_bar(stat = "identity")+
geom_text(aes(label=percentage),vjust=0)+
ggtitle("Population of Interest")+
theme(plot.title = element_text(lineheight=.7, face="bold",hjust = 0.5))+
scale_y_continuous(limits=c(0,1))
data_race<-data.frame(race=c("White","Black","Hispanic","Other"),percentage=c(0.266,0.250,0.283,0.275))
ggplot(data_race, aes(x=race,y = percentage))+
geom_bar(stat = "identity")+
geom_text(aes(label=percentage),vjust=0)+
ggtitle("Pop of Interest that Received an IC by Race")+
theme(plot.title = element_text(lineheight=.7, face="bold",hjust = 0.5))+
scale_y_continuous(limits=c(0,1))
data_pay<-data.frame(pay=c('Private','M-care','M-caid','Workers','Self-Pay','No Charge','Other','Don\'t know'),percentage=c(0.283,0.233,0.284,0.140,0.193,0.271,0.243,0.334))
ggplot(data_pay, aes(x=pay,y = percentage))+
geom_bar(stat = "identity")+
geom_text(aes(label=percentage),vjust=0)+
ggtitle("Pop of Interest that Received an IC by Pay Method")+
theme(plot.title = element_text(lineheight=.7, face="bold",hjust = 0.5))+
scale_y_continuous(limits=c(0,1))
data_year<-data.frame(year=c(2005,2006,2007,2008,2009,2010),percentage=c(0.231,0.253,0.270,0.290,0.246,0.302))
ggplot(data_year,aes(x=year,y=percentage))+
geom_line()+
geom_point()+
geom_text(aes(label=percentage),vjust=3)+
ggtitle("Pop of Interest that Received an IC by Year")+
theme(plot.title = element_text(lineheight=.7,hjust = 0.5))+
scale_y_continuous(limits=c(0,1))
|
#
# Pharmacogenomics Prediction Pipeline - Drug Sensitivity Visualization
#
# https://github.com/DCGenomics/Pharmacogenomics_Prediction_Pipeline_P3
#
library(shiny)
library(MASS)
library(ggplot2)
library(matrixStats)
options(stringsAsFactors=FALSE)
options(shiny.trace=TRUE)
# Filepaths
base_dir = "/data/datasets/filtered/"
rnaseq_input = file.path(base_dir, "rnaseq_expression/HMCL_ensembl74_Counts_normalized.csv")
exome_input = file.path(base_dir, "exome_variants/genes_per_cell_line.txt")
drug_response_input = file.path(base_dir, "drug_response/iLAC50_filtered.csv")
gene_expr = read.csv(rnaseq_input, row.names=1)
gene_snps = read.delim(exome_input, row.names=1)
drug_data = read.csv(drug_response_input, row.names=1)
# Drop cell lines without drug data
gene_expr = gene_expr[,colnames(gene_expr) %in% colnames(drug_data)]
gene_snps = gene_snps[,colnames(gene_snps) %in% colnames(drug_data)]
# Cell line names
cell_lines = colnames(drug_data)
# Testing -- for now, only show options for 1000 most variable genes
variances = rowVars(as.matrix(gene_expr))
var_cutoff = as.numeric(quantile(variances, 0.95))
gene_choices = rownames(gene_expr)[variances > var_cutoff]
drug_choices = rownames(drug_data)
#
# Server
#
server = function(input, output, session) {
output$gene_expr = renderPlot({
df = data.frame(
cell_line=cell_lines,
gene=as.numeric(gene_expr[rownames(gene_expr) == input$gene,]),
agent=as.numeric(drug_data[rownames(drug_data) == input$drug,]),
exome=as.numeric(gene_snps[rownames(gene_snps) == input$gene,])
)
#df = df[complete.cases(df),]
plt = ggplot(df, aes(gene, agent))
# color by exome data if present
if (all(is.na(df$exome))) {
plt = plt + geom_point()
} else {
plt = plt + geom_point(aes(colour=factor(exome)))
}
plt + geom_smooth(method=input$smooth_method) +
xlab(sprintf("Log2-CPM Expression (%s)", input$gene)) +
ylab(sprintf("Log-AC50 (%s)", input$drug)) +
ggtitle("Log AC50 vs. Expression")
})
}
#
# UI
#
ui = fluidPage(
# Application title
titlePanel("Pharmacogenomics Prediction Pipeline - Drug Sensitivity Visualization"),
sidebarLayout(
sidebarPanel(
selectInput("drug", "Drug:", choices=drug_choices),
selectInput("gene", "Gene:", choices=gene_choices),
selectInput("smooth_method", "Smooth method:",
choices=c('lm', 'rlm', 'loess')),
width=2
),
mainPanel(plotOutput("gene_expr", height=640))
)
)
# Launch app
shinyApp(ui=ui, server=server)
| /shiny/drug_sensitivity/app.R | permissive | khughitt/Pharmacogenomics_Prediction_Pipeline_P3 | R | false | false | 2,676 | r | #
# Pharmacogenomics Prediction Pipeline - Drug Sensitivity Visualization
#
# https://github.com/DCGenomics/Pharmacogenomics_Prediction_Pipeline_P3
#
library(shiny)
library(MASS)
library(ggplot2)
library(matrixStats)
options(stringsAsFactors=FALSE)
options(shiny.trace=TRUE)
# Filepaths
base_dir = "/data/datasets/filtered/"
rnaseq_input = file.path(base_dir, "rnaseq_expression/HMCL_ensembl74_Counts_normalized.csv")
exome_input = file.path(base_dir, "exome_variants/genes_per_cell_line.txt")
drug_response_input = file.path(base_dir, "drug_response/iLAC50_filtered.csv")
gene_expr = read.csv(rnaseq_input, row.names=1)
gene_snps = read.delim(exome_input, row.names=1)
drug_data = read.csv(drug_response_input, row.names=1)
# Drop cell lines without drug data
gene_expr = gene_expr[,colnames(gene_expr) %in% colnames(drug_data)]
gene_snps = gene_snps[,colnames(gene_snps) %in% colnames(drug_data)]
# Cell line names
cell_lines = colnames(drug_data)
# Testing -- for now, only show options for 1000 most variable genes
variances = rowVars(as.matrix(gene_expr))
var_cutoff = as.numeric(quantile(variances, 0.95))
gene_choices = rownames(gene_expr)[variances > var_cutoff]
drug_choices = rownames(drug_data)
#
# Server
#
server = function(input, output, session) {
output$gene_expr = renderPlot({
df = data.frame(
cell_line=cell_lines,
gene=as.numeric(gene_expr[rownames(gene_expr) == input$gene,]),
agent=as.numeric(drug_data[rownames(drug_data) == input$drug,]),
exome=as.numeric(gene_snps[rownames(gene_snps) == input$gene,])
)
#df = df[complete.cases(df),]
plt = ggplot(df, aes(gene, agent))
# color by exome data if present
if (all(is.na(df$exome))) {
plt = plt + geom_point()
} else {
plt = plt + geom_point(aes(colour=factor(exome)))
}
plt + geom_smooth(method=input$smooth_method) +
xlab(sprintf("Log2-CPM Expression (%s)", input$gene)) +
ylab(sprintf("Log-AC50 (%s)", input$drug)) +
ggtitle("Log AC50 vs. Expression")
})
}
#
# UI
#
ui = fluidPage(
# Application title
titlePanel("Pharmacogenomics Prediction Pipeline - Drug Sensitivity Visualization"),
sidebarLayout(
sidebarPanel(
selectInput("drug", "Drug:", choices=drug_choices),
selectInput("gene", "Gene:", choices=gene_choices),
selectInput("smooth_method", "Smooth method:",
choices=c('lm', 'rlm', 'loess')),
width=2
),
mainPanel(plotOutput("gene_expr", height=640))
)
)
# Launch app
shinyApp(ui=ui, server=server)
|
modelFrame <- structure(function #Dendroclimatic-fluctuations modeling
### This function develops recursive evaluation of functions for
### one-level modeling (FOLM) and LME detrending of dendroclimatic
### chronologies.
##details<< Defaults model fluctuations in
##tree-ring width chronologies via recursive
##implementation of four FOLM:
##\code{\link{rtimes}}, \code{\link{scacum}},
##\code{\link{amod}}, and
##\code{\link{frametoLme}}. Nevertheless,
##other FOLM can be implemented to model
##aridity-index fluctuations(see example with
##climatic data). Processed chronologies are
##detrended with \code{\link{lme}} function
##and other \code{\link{nlme}} methods
##. Internal algorithm uses
##\code{\link{shiftFrame}}
##\code{\link{arguSelect}} and
##\code{\link{ringApply}}
##functions. Consequently, arguments that are
##not iterated over factor-level labels in the
##processed data are specified in 'MoreArgs'
##lists (see examples). Arguments in
##\code{modelFrame} objects can be updated
##with \code{\link{update}} function.
##references<< Lara W., F. Bravo,
##D. Maguire. 2013. Modeling patterns between
##drought and tree biomass growth from
##dendrochronological data: A multilevel
##approach. Agric. For. Meteorol.,
##178-179:140-151.
(
rd, ##<<\code{data.frame} or \code{list}. Dendroclimatic
##chronology or Multilevel ecological data series.
fn = list('rtimes','scacum','amod'), ##<< \code{list}. Names of
##the functions for one-level
##modeling to be recursively
##implemented.
lv = list(2,1,1), ##<< \code{list}. \code{numeric} positions in
##the factor-level labels of \code{rd} to
##implement the one-level functions. If
##\code{rd} is a MEDS, then \code{character}
##names of the factor-level columns.
form = 'tdForm', ##<<\code{character} or \code{NULL}. Name of a
##detrending formula. Two in-package
##methods are available: the default
##\code{\link{tdForm}} or
##\code{\link{lmeForm}}.
... ##<< Further arguments in \code{\link{mUnits}}, or in the
##functions for one-level modeling, or in the
##\code{\link{lme}} function/methods, or in the detrending
##formula.
) {
lse <- list(...)
mln <- length(lv)
iswide <- all(sapply(rd, is.numeric))
islist <- class(rd)%in%'list'
if(any(iswide, islist)){
rd <- shiftFrame(rd)
}
fns <- 'mUnits'
if(any(names(lse)%in%names(formals(fns)[-1L]))){
nmu <- cClass(rd, 'numeric')
rdu <- arguSelect(x = rd[,nmu], fun = fns, ...)
rd[,nmu] <- do.call(fns, rdu)
if('sc.c'%in%names(lse)){
sca <- arguSelect(x = lse$'sc.c', fun = fns, ...)
lse[['sc.c']] <- do.call(fns, sca)
}
}
mar <- 'MoreArgs'
ls. <- lapply(lse,class)%in%'list'
yls <- Map(unlist,lse[ls.])
yls[c('fn','lv')] <- list(fn,lv)
nma <- yls[!names(yls)%in%mar]
lsp <- lse[!names(lse)%in%names(yls)]
s <- names(lse)%in%mar
if(any(s))
lsp[[mar]] <- lse[[mar]]
ar <- list()
mln <- length(nma[[1L]])
for(i in 1:mln){
lsl <- lapply(nma, '[[', i)
lt <- list(rd, fun = 'ringApply')
nl <- unlist(Map(levels,
rd[cClass(rd,'factor')]))
spd <- function(x){
unlist(strsplit(x, '\\.'))}
my <- unlist(Map(function(x)
!is.null(names(x)) &&
spd(names(x))%in% nl, lsp))
if(any(my)) {
lsp[names(lsp)[my]] <- Map(function(x)
levexp(x, rd),lsp[names(lsp)[my]])}
lst <- c(lsl, lsp, lt)
ar[[i]] <- do.call('arguSelect', lst)
rd <- do.call('ringApply', ar[[i]])
}
arl <- arguSelect(rd,
fun = c('frametoLme','lme', form),...)
arl[['form']] <- form
rd <- do.call('frametoLme',arl)
rd[['call']] <- sys.call()
class(rd) <- c('modelFrame', class(rd))
return(rd)
### Threefold list with fluctuations in \code{fluc},
### {\link{groupedData}} object in \code{model}, and model call in
### \code{call}.
} , ex=function() {
##TRW chronology (mm) and inside-bark radii
data(Pchron,envir = environment())
data(Pradii03,envir = environment())
## Tree-ring width fluctuations:
trwf <- modelFrame(Pchron,
sc.c = Pradii03,
rf.t = 2003,
log.t = TRUE)
summary(trwf$'model')
## Tree-diameter fluctuations:
tdf <- modelFrame(Pchron,
sc.c = Pradii03,
rf.t = 2003,
log.t = TRUE,
MoreArgs = list(mp = c(pi,2)))
summary(tdf$'model')
## Climatic records:
data(Temp,envir = environment())
data(Prec,envir = environment())
## Aridity-index fluctuations:
aif <- modelFrame(rd = list(Prec, Temp),
fn = list('moveYr','wlai'),
lv = list('year','year'),
form = 'lmeForm')
summary(aif$'model')
})
| /R/modelFrame.R | no_license | talalbutt/BIOdry | R | false | false | 5,951 | r | modelFrame <- structure(function #Dendroclimatic-fluctuations modeling
### This function develops recursive evaluation of functions for
### one-level modeling (FOLM) and LME detrending of dendroclimatic
### chronologies.
##details<< Defaults model fluctuations in
##tree-ring width chronologies via recursive
##implementation of four FOLM:
##\code{\link{rtimes}}, \code{\link{scacum}},
##\code{\link{amod}}, and
##\code{\link{frametoLme}}. Nevertheless,
##other FOLM can be implemented to model
##aridity-index fluctuations(see example with
##climatic data). Processed chronologies are
##detrended with \code{\link{lme}} function
##and other \code{\link{nlme}} methods
##. Internal algorithm uses
##\code{\link{shiftFrame}}
##\code{\link{arguSelect}} and
##\code{\link{ringApply}}
##functions. Consequently, arguments that are
##not iterated over factor-level labels in the
##processed data are specified in 'MoreArgs'
##lists (see examples). Arguments in
##\code{modelFrame} objects can be updated
##with \code{\link{update}} function.
##references<< Lara W., F. Bravo,
##D. Maguire. 2013. Modeling patterns between
##drought and tree biomass growth from
##dendrochronological data: A multilevel
##approach. Agric. For. Meteorol.,
##178-179:140-151.
(
rd, ##<<\code{data.frame} or \code{list}. Dendroclimatic
##chronology or Multilevel ecological data series.
fn = list('rtimes','scacum','amod'), ##<< \code{list}. Names of
##the functions for one-level
##modeling to be recursively
##implemented.
lv = list(2,1,1), ##<< \code{list}. \code{numeric} positions in
##the factor-level labels of \code{rd} to
##implement the one-level functions. If
##\code{rd} is a MEDS, then \code{character}
##names of the factor-level columns.
form = 'tdForm', ##<<\code{character} or \code{NULL}. Name of a
##detrending formula. Two in-package
##methods are available: the default
##\code{\link{tdForm}} or
##\code{\link{lmeForm}}.
... ##<< Further arguments in \code{\link{mUnits}}, or in the
##functions for one-level modeling, or in the
##\code{\link{lme}} function/methods, or in the detrending
##formula.
) {
lse <- list(...)
mln <- length(lv)
iswide <- all(sapply(rd, is.numeric))
islist <- class(rd)%in%'list'
if(any(iswide, islist)){
rd <- shiftFrame(rd)
}
fns <- 'mUnits'
if(any(names(lse)%in%names(formals(fns)[-1L]))){
nmu <- cClass(rd, 'numeric')
rdu <- arguSelect(x = rd[,nmu], fun = fns, ...)
rd[,nmu] <- do.call(fns, rdu)
if('sc.c'%in%names(lse)){
sca <- arguSelect(x = lse$'sc.c', fun = fns, ...)
lse[['sc.c']] <- do.call(fns, sca)
}
}
mar <- 'MoreArgs'
ls. <- lapply(lse,class)%in%'list'
yls <- Map(unlist,lse[ls.])
yls[c('fn','lv')] <- list(fn,lv)
nma <- yls[!names(yls)%in%mar]
lsp <- lse[!names(lse)%in%names(yls)]
s <- names(lse)%in%mar
if(any(s))
lsp[[mar]] <- lse[[mar]]
ar <- list()
mln <- length(nma[[1L]])
for(i in 1:mln){
lsl <- lapply(nma, '[[', i)
lt <- list(rd, fun = 'ringApply')
nl <- unlist(Map(levels,
rd[cClass(rd,'factor')]))
spd <- function(x){
unlist(strsplit(x, '\\.'))}
my <- unlist(Map(function(x)
!is.null(names(x)) &&
spd(names(x))%in% nl, lsp))
if(any(my)) {
lsp[names(lsp)[my]] <- Map(function(x)
levexp(x, rd),lsp[names(lsp)[my]])}
lst <- c(lsl, lsp, lt)
ar[[i]] <- do.call('arguSelect', lst)
rd <- do.call('ringApply', ar[[i]])
}
arl <- arguSelect(rd,
fun = c('frametoLme','lme', form),...)
arl[['form']] <- form
rd <- do.call('frametoLme',arl)
rd[['call']] <- sys.call()
class(rd) <- c('modelFrame', class(rd))
return(rd)
### Threefold list with fluctuations in \code{fluc},
### {\link{groupedData}} object in \code{model}, and model call in
### \code{call}.
} , ex=function() {
##TRW chronology (mm) and inside-bark radii
data(Pchron,envir = environment())
data(Pradii03,envir = environment())
## Tree-ring width fluctuations:
trwf <- modelFrame(Pchron,
sc.c = Pradii03,
rf.t = 2003,
log.t = TRUE)
summary(trwf$'model')
## Tree-diameter fluctuations:
tdf <- modelFrame(Pchron,
sc.c = Pradii03,
rf.t = 2003,
log.t = TRUE,
MoreArgs = list(mp = c(pi,2)))
summary(tdf$'model')
## Climatic records:
data(Temp,envir = environment())
data(Prec,envir = environment())
## Aridity-index fluctuations:
aif <- modelFrame(rd = list(Prec, Temp),
fn = list('moveYr','wlai'),
lv = list('year','year'),
form = 'lmeForm')
summary(aif$'model')
})
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_operation new_request send_request
#' @include cloudhsm_service.R
NULL
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Adds or overwrites one or more tags for the specified AWS CloudHSM
#' resource.
#'
#' Each tag consists of a key and a value. Tag keys must be unique to each
#' resource.
#'
#' @usage
#' cloudhsm_add_tags_to_resource(ResourceArn, TagList)
#'
#' @param ResourceArn [required] The Amazon Resource Name (ARN) of the AWS CloudHSM resource to tag.
#' @param TagList [required] One or more tags.
#'
#' @section Request syntax:
#' ```
#' svc$add_tags_to_resource(
#' ResourceArn = "string",
#' TagList = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_add_tags_to_resource
cloudhsm_add_tags_to_resource <- function(ResourceArn, TagList) {
op <- new_operation(
name = "AddTagsToResource",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$add_tags_to_resource_input(ResourceArn = ResourceArn, TagList = TagList)
output <- .cloudhsm$add_tags_to_resource_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$add_tags_to_resource <- cloudhsm_add_tags_to_resource
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Creates a high-availability partition group. A high-availability
#' partition group is a group of partitions that spans multiple physical
#' HSMs.
#'
#' @usage
#' cloudhsm_create_hapg(Label)
#'
#' @param Label [required] The label of the new high-availability partition group.
#'
#' @section Request syntax:
#' ```
#' svc$create_hapg(
#' Label = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_create_hapg
cloudhsm_create_hapg <- function(Label) {
op <- new_operation(
name = "CreateHapg",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$create_hapg_input(Label = Label)
output <- .cloudhsm$create_hapg_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$create_hapg <- cloudhsm_create_hapg
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Creates an uninitialized HSM instance.
#'
#' There is an upfront fee charged for each HSM instance that you create
#' with the `CreateHsm` operation. If you accidentally provision an HSM and
#' want to request a refund, delete the instance using the DeleteHsm
#' operation, go to the [AWS Support
#' Center](https://console.aws.amazon.com/support/home), create a new case,
#' and select **Account and Billing Support**.
#'
#' It can take up to 20 minutes to create and provision an HSM. You can
#' monitor the status of the HSM with the DescribeHsm operation. The HSM is
#' ready to be initialized when the status changes to `RUNNING`.
#'
#' @usage
#' cloudhsm_create_hsm(SubnetId, SshKey, EniIp, IamRoleArn, ExternalId,
#' SubscriptionType, ClientToken, SyslogIp)
#'
#' @param SubnetId [required] The identifier of the subnet in your VPC in which to place the HSM.
#' @param SshKey [required] The SSH public key to install on the HSM.
#' @param EniIp The IP address to assign to the HSM\'s ENI.
#'
#' If an IP address is not specified, an IP address will be randomly chosen
#' from the CIDR range of the subnet.
#' @param IamRoleArn [required] The ARN of an IAM role to enable the AWS CloudHSM service to allocate an
#' ENI on your behalf.
#' @param ExternalId The external ID from `IamRoleArn`, if present.
#' @param SubscriptionType [required]
#' @param ClientToken A user-defined token to ensure idempotence. Subsequent calls to this
#' operation with the same token will be ignored.
#' @param SyslogIp The IP address for the syslog monitoring server. The AWS CloudHSM
#' service only supports one syslog monitoring server.
#'
#' @section Request syntax:
#' ```
#' svc$create_hsm(
#' SubnetId = "string",
#' SshKey = "string",
#' EniIp = "string",
#' IamRoleArn = "string",
#' ExternalId = "string",
#' SubscriptionType = "PRODUCTION",
#' ClientToken = "string",
#' SyslogIp = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_create_hsm
cloudhsm_create_hsm <- function(SubnetId, SshKey, EniIp = NULL, IamRoleArn, ExternalId = NULL, SubscriptionType, ClientToken = NULL, SyslogIp = NULL) {
op <- new_operation(
name = "CreateHsm",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$create_hsm_input(SubnetId = SubnetId, SshKey = SshKey, EniIp = EniIp, IamRoleArn = IamRoleArn, ExternalId = ExternalId, SubscriptionType = SubscriptionType, ClientToken = ClientToken, SyslogIp = SyslogIp)
output <- .cloudhsm$create_hsm_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$create_hsm <- cloudhsm_create_hsm
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Creates an HSM client.
#'
#' @usage
#' cloudhsm_create_luna_client(Label, Certificate)
#'
#' @param Label The label for the client.
#' @param Certificate [required] The contents of a Base64-Encoded X.509 v3 certificate to be installed on
#' the HSMs used by this client.
#'
#' @section Request syntax:
#' ```
#' svc$create_luna_client(
#' Label = "string",
#' Certificate = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_create_luna_client
cloudhsm_create_luna_client <- function(Label = NULL, Certificate) {
op <- new_operation(
name = "CreateLunaClient",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$create_luna_client_input(Label = Label, Certificate = Certificate)
output <- .cloudhsm$create_luna_client_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$create_luna_client <- cloudhsm_create_luna_client
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Deletes a high-availability partition group.
#'
#' @usage
#' cloudhsm_delete_hapg(HapgArn)
#'
#' @param HapgArn [required] The ARN of the high-availability partition group to delete.
#'
#' @section Request syntax:
#' ```
#' svc$delete_hapg(
#' HapgArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_delete_hapg
cloudhsm_delete_hapg <- function(HapgArn) {
op <- new_operation(
name = "DeleteHapg",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$delete_hapg_input(HapgArn = HapgArn)
output <- .cloudhsm$delete_hapg_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$delete_hapg <- cloudhsm_delete_hapg
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Deletes an HSM. After completion, this operation cannot be undone and
#' your key material cannot be recovered.
#'
#' @usage
#' cloudhsm_delete_hsm(HsmArn)
#'
#' @param HsmArn [required] The ARN of the HSM to delete.
#'
#' @section Request syntax:
#' ```
#' svc$delete_hsm(
#' HsmArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_delete_hsm
cloudhsm_delete_hsm <- function(HsmArn) {
op <- new_operation(
name = "DeleteHsm",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$delete_hsm_input(HsmArn = HsmArn)
output <- .cloudhsm$delete_hsm_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$delete_hsm <- cloudhsm_delete_hsm
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Deletes a client.
#'
#' @usage
#' cloudhsm_delete_luna_client(ClientArn)
#'
#' @param ClientArn [required] The ARN of the client to delete.
#'
#' @section Request syntax:
#' ```
#' svc$delete_luna_client(
#' ClientArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_delete_luna_client
cloudhsm_delete_luna_client <- function(ClientArn) {
op <- new_operation(
name = "DeleteLunaClient",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$delete_luna_client_input(ClientArn = ClientArn)
output <- .cloudhsm$delete_luna_client_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$delete_luna_client <- cloudhsm_delete_luna_client
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Retrieves information about a high-availability partition group.
#'
#' @usage
#' cloudhsm_describe_hapg(HapgArn)
#'
#' @param HapgArn [required] The ARN of the high-availability partition group to describe.
#'
#' @section Request syntax:
#' ```
#' svc$describe_hapg(
#' HapgArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_describe_hapg
cloudhsm_describe_hapg <- function(HapgArn) {
op <- new_operation(
name = "DescribeHapg",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$describe_hapg_input(HapgArn = HapgArn)
output <- .cloudhsm$describe_hapg_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$describe_hapg <- cloudhsm_describe_hapg
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Retrieves information about an HSM. You can identify the HSM by its ARN
#' or its serial number.
#'
#' @usage
#' cloudhsm_describe_hsm(HsmArn, HsmSerialNumber)
#'
#' @param HsmArn The ARN of the HSM. Either the `HsmArn` or the `SerialNumber` parameter
#' must be specified.
#' @param HsmSerialNumber The serial number of the HSM. Either the `HsmArn` or the
#' `HsmSerialNumber` parameter must be specified.
#'
#' @section Request syntax:
#' ```
#' svc$describe_hsm(
#' HsmArn = "string",
#' HsmSerialNumber = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_describe_hsm
cloudhsm_describe_hsm <- function(HsmArn = NULL, HsmSerialNumber = NULL) {
op <- new_operation(
name = "DescribeHsm",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$describe_hsm_input(HsmArn = HsmArn, HsmSerialNumber = HsmSerialNumber)
output <- .cloudhsm$describe_hsm_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$describe_hsm <- cloudhsm_describe_hsm
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Retrieves information about an HSM client.
#'
#' @usage
#' cloudhsm_describe_luna_client(ClientArn, CertificateFingerprint)
#'
#' @param ClientArn The ARN of the client.
#' @param CertificateFingerprint The certificate fingerprint.
#'
#' @section Request syntax:
#' ```
#' svc$describe_luna_client(
#' ClientArn = "string",
#' CertificateFingerprint = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_describe_luna_client
cloudhsm_describe_luna_client <- function(ClientArn = NULL, CertificateFingerprint = NULL) {
op <- new_operation(
name = "DescribeLunaClient",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$describe_luna_client_input(ClientArn = ClientArn, CertificateFingerprint = CertificateFingerprint)
output <- .cloudhsm$describe_luna_client_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$describe_luna_client <- cloudhsm_describe_luna_client
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Gets the configuration files necessary to connect to all high
#' availability partition groups the client is associated with.
#'
#' @usage
#' cloudhsm_get_config(ClientArn, ClientVersion, HapgList)
#'
#' @param ClientArn [required] The ARN of the client.
#' @param ClientVersion [required] The client version.
#' @param HapgList [required] A list of ARNs that identify the high-availability partition groups that
#' are associated with the client.
#'
#' @section Request syntax:
#' ```
#' svc$get_config(
#' ClientArn = "string",
#' ClientVersion = "5.1"|"5.3",
#' HapgList = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_get_config
cloudhsm_get_config <- function(ClientArn, ClientVersion, HapgList) {
op <- new_operation(
name = "GetConfig",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$get_config_input(ClientArn = ClientArn, ClientVersion = ClientVersion, HapgList = HapgList)
output <- .cloudhsm$get_config_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$get_config <- cloudhsm_get_config
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Lists the Availability Zones that have available AWS CloudHSM capacity.
#'
#' @usage
#' cloudhsm_list_available_zones()
#'
#' @section Request syntax:
#' ```
#' svc$list_available_zones()
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_list_available_zones
cloudhsm_list_available_zones <- function() {
op <- new_operation(
name = "ListAvailableZones",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$list_available_zones_input()
output <- .cloudhsm$list_available_zones_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$list_available_zones <- cloudhsm_list_available_zones
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Lists the high-availability partition groups for the account.
#'
#' This operation supports pagination with the use of the `NextToken`
#' member. If more results are available, the `NextToken` member of the
#' response contains a token that you pass in the next call to `ListHapgs`
#' to retrieve the next set of items.
#'
#' @usage
#' cloudhsm_list_hapgs(NextToken)
#'
#' @param NextToken The `NextToken` value from a previous call to `ListHapgs`. Pass null if
#' this is the first call.
#'
#' @section Request syntax:
#' ```
#' svc$list_hapgs(
#' NextToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_list_hapgs
cloudhsm_list_hapgs <- function(NextToken = NULL) {
op <- new_operation(
name = "ListHapgs",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$list_hapgs_input(NextToken = NextToken)
output <- .cloudhsm$list_hapgs_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$list_hapgs <- cloudhsm_list_hapgs
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Retrieves the identifiers of all of the HSMs provisioned for the current
#' customer.
#'
#' This operation supports pagination with the use of the `NextToken`
#' member. If more results are available, the `NextToken` member of the
#' response contains a token that you pass in the next call to `ListHsms`
#' to retrieve the next set of items.
#'
#' @usage
#' cloudhsm_list_hsms(NextToken)
#'
#' @param NextToken The `NextToken` value from a previous call to `ListHsms`. Pass null if
#' this is the first call.
#'
#' @section Request syntax:
#' ```
#' svc$list_hsms(
#' NextToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_list_hsms
cloudhsm_list_hsms <- function(NextToken = NULL) {
op <- new_operation(
name = "ListHsms",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$list_hsms_input(NextToken = NextToken)
output <- .cloudhsm$list_hsms_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$list_hsms <- cloudhsm_list_hsms
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Lists all of the clients.
#'
#' This operation supports pagination with the use of the `NextToken`
#' member. If more results are available, the `NextToken` member of the
#' response contains a token that you pass in the next call to
#' `ListLunaClients` to retrieve the next set of items.
#'
#' @usage
#' cloudhsm_list_luna_clients(NextToken)
#'
#' @param NextToken The `NextToken` value from a previous call to `ListLunaClients`. Pass
#' null if this is the first call.
#'
#' @section Request syntax:
#' ```
#' svc$list_luna_clients(
#' NextToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_list_luna_clients
cloudhsm_list_luna_clients <- function(NextToken = NULL) {
op <- new_operation(
name = "ListLunaClients",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$list_luna_clients_input(NextToken = NextToken)
output <- .cloudhsm$list_luna_clients_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$list_luna_clients <- cloudhsm_list_luna_clients
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Returns a list of all tags for the specified AWS CloudHSM resource.
#'
#' @usage
#' cloudhsm_list_tags_for_resource(ResourceArn)
#'
#' @param ResourceArn [required] The Amazon Resource Name (ARN) of the AWS CloudHSM resource.
#'
#' @section Request syntax:
#' ```
#' svc$list_tags_for_resource(
#' ResourceArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_list_tags_for_resource
cloudhsm_list_tags_for_resource <- function(ResourceArn) {
op <- new_operation(
name = "ListTagsForResource",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$list_tags_for_resource_input(ResourceArn = ResourceArn)
output <- .cloudhsm$list_tags_for_resource_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$list_tags_for_resource <- cloudhsm_list_tags_for_resource
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Modifies an existing high-availability partition group.
#'
#' @usage
#' cloudhsm_modify_hapg(HapgArn, Label, PartitionSerialList)
#'
#' @param HapgArn [required] The ARN of the high-availability partition group to modify.
#' @param Label The new label for the high-availability partition group.
#' @param PartitionSerialList The list of partition serial numbers to make members of the
#' high-availability partition group.
#'
#' @section Request syntax:
#' ```
#' svc$modify_hapg(
#' HapgArn = "string",
#' Label = "string",
#' PartitionSerialList = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_modify_hapg
cloudhsm_modify_hapg <- function(HapgArn, Label = NULL, PartitionSerialList = NULL) {
op <- new_operation(
name = "ModifyHapg",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$modify_hapg_input(HapgArn = HapgArn, Label = Label, PartitionSerialList = PartitionSerialList)
output <- .cloudhsm$modify_hapg_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$modify_hapg <- cloudhsm_modify_hapg
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Modifies an HSM.
#'
#' This operation can result in the HSM being offline for up to 15 minutes
#' while the AWS CloudHSM service is reconfigured. If you are modifying a
#' production HSM, you should ensure that your AWS CloudHSM service is
#' configured for high availability, and consider executing this operation
#' during a maintenance window.
#'
#' @usage
#' cloudhsm_modify_hsm(HsmArn, SubnetId, EniIp, IamRoleArn, ExternalId,
#' SyslogIp)
#'
#' @param HsmArn [required] The ARN of the HSM to modify.
#' @param SubnetId The new identifier of the subnet that the HSM is in. The new subnet must
#' be in the same Availability Zone as the current subnet.
#' @param EniIp The new IP address for the elastic network interface (ENI) attached to
#' the HSM.
#'
#' If the HSM is moved to a different subnet, and an IP address is not
#' specified, an IP address will be randomly chosen from the CIDR range of
#' the new subnet.
#' @param IamRoleArn The new IAM role ARN.
#' @param ExternalId The new external ID.
#' @param SyslogIp The new IP address for the syslog monitoring server. The AWS CloudHSM
#' service only supports one syslog monitoring server.
#'
#' @section Request syntax:
#' ```
#' svc$modify_hsm(
#' HsmArn = "string",
#' SubnetId = "string",
#' EniIp = "string",
#' IamRoleArn = "string",
#' ExternalId = "string",
#' SyslogIp = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_modify_hsm
cloudhsm_modify_hsm <- function(HsmArn, SubnetId = NULL, EniIp = NULL, IamRoleArn = NULL, ExternalId = NULL, SyslogIp = NULL) {
op <- new_operation(
name = "ModifyHsm",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$modify_hsm_input(HsmArn = HsmArn, SubnetId = SubnetId, EniIp = EniIp, IamRoleArn = IamRoleArn, ExternalId = ExternalId, SyslogIp = SyslogIp)
output <- .cloudhsm$modify_hsm_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$modify_hsm <- cloudhsm_modify_hsm
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Modifies the certificate used by the client.
#'
#' This action can potentially start a workflow to install the new
#' certificate on the client\'s HSMs.
#'
#' @usage
#' cloudhsm_modify_luna_client(ClientArn, Certificate)
#'
#' @param ClientArn [required] The ARN of the client.
#' @param Certificate [required] The new certificate for the client.
#'
#' @section Request syntax:
#' ```
#' svc$modify_luna_client(
#' ClientArn = "string",
#' Certificate = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_modify_luna_client
cloudhsm_modify_luna_client <- function(ClientArn, Certificate) {
op <- new_operation(
name = "ModifyLunaClient",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$modify_luna_client_input(ClientArn = ClientArn, Certificate = Certificate)
output <- .cloudhsm$modify_luna_client_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$modify_luna_client <- cloudhsm_modify_luna_client
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Removes one or more tags from the specified AWS CloudHSM resource.
#'
#' To remove a tag, specify only the tag key to remove (not the value). To
#' overwrite the value for an existing tag, use AddTagsToResource.
#'
#' @usage
#' cloudhsm_remove_tags_from_resource(ResourceArn, TagKeyList)
#'
#' @param ResourceArn [required] The Amazon Resource Name (ARN) of the AWS CloudHSM resource.
#' @param TagKeyList [required] The tag key or keys to remove.
#'
#' Specify only the tag key to remove (not the value). To overwrite the
#' value for an existing tag, use AddTagsToResource.
#'
#' @section Request syntax:
#' ```
#' svc$remove_tags_from_resource(
#' ResourceArn = "string",
#' TagKeyList = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_remove_tags_from_resource
cloudhsm_remove_tags_from_resource <- function(ResourceArn, TagKeyList) {
op <- new_operation(
name = "RemoveTagsFromResource",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$remove_tags_from_resource_input(ResourceArn = ResourceArn, TagKeyList = TagKeyList)
output <- .cloudhsm$remove_tags_from_resource_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$remove_tags_from_resource <- cloudhsm_remove_tags_from_resource
| /cran/paws.security.identity/R/cloudhsm_operations.R | permissive | peoplecure/paws | R | false | false | 38,730 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_operation new_request send_request
#' @include cloudhsm_service.R
NULL
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Adds or overwrites one or more tags for the specified AWS CloudHSM
#' resource.
#'
#' Each tag consists of a key and a value. Tag keys must be unique to each
#' resource.
#'
#' @usage
#' cloudhsm_add_tags_to_resource(ResourceArn, TagList)
#'
#' @param ResourceArn [required] The Amazon Resource Name (ARN) of the AWS CloudHSM resource to tag.
#' @param TagList [required] One or more tags.
#'
#' @section Request syntax:
#' ```
#' svc$add_tags_to_resource(
#' ResourceArn = "string",
#' TagList = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_add_tags_to_resource
cloudhsm_add_tags_to_resource <- function(ResourceArn, TagList) {
op <- new_operation(
name = "AddTagsToResource",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$add_tags_to_resource_input(ResourceArn = ResourceArn, TagList = TagList)
output <- .cloudhsm$add_tags_to_resource_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$add_tags_to_resource <- cloudhsm_add_tags_to_resource
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Creates a high-availability partition group. A high-availability
#' partition group is a group of partitions that spans multiple physical
#' HSMs.
#'
#' @usage
#' cloudhsm_create_hapg(Label)
#'
#' @param Label [required] The label of the new high-availability partition group.
#'
#' @section Request syntax:
#' ```
#' svc$create_hapg(
#' Label = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_create_hapg
cloudhsm_create_hapg <- function(Label) {
op <- new_operation(
name = "CreateHapg",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$create_hapg_input(Label = Label)
output <- .cloudhsm$create_hapg_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$create_hapg <- cloudhsm_create_hapg
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Creates an uninitialized HSM instance.
#'
#' There is an upfront fee charged for each HSM instance that you create
#' with the `CreateHsm` operation. If you accidentally provision an HSM and
#' want to request a refund, delete the instance using the DeleteHsm
#' operation, go to the [AWS Support
#' Center](https://console.aws.amazon.com/support/home), create a new case,
#' and select **Account and Billing Support**.
#'
#' It can take up to 20 minutes to create and provision an HSM. You can
#' monitor the status of the HSM with the DescribeHsm operation. The HSM is
#' ready to be initialized when the status changes to `RUNNING`.
#'
#' @usage
#' cloudhsm_create_hsm(SubnetId, SshKey, EniIp, IamRoleArn, ExternalId,
#' SubscriptionType, ClientToken, SyslogIp)
#'
#' @param SubnetId [required] The identifier of the subnet in your VPC in which to place the HSM.
#' @param SshKey [required] The SSH public key to install on the HSM.
#' @param EniIp The IP address to assign to the HSM\'s ENI.
#'
#' If an IP address is not specified, an IP address will be randomly chosen
#' from the CIDR range of the subnet.
#' @param IamRoleArn [required] The ARN of an IAM role to enable the AWS CloudHSM service to allocate an
#' ENI on your behalf.
#' @param ExternalId The external ID from `IamRoleArn`, if present.
#' @param SubscriptionType [required]
#' @param ClientToken A user-defined token to ensure idempotence. Subsequent calls to this
#' operation with the same token will be ignored.
#' @param SyslogIp The IP address for the syslog monitoring server. The AWS CloudHSM
#' service only supports one syslog monitoring server.
#'
#' @section Request syntax:
#' ```
#' svc$create_hsm(
#' SubnetId = "string",
#' SshKey = "string",
#' EniIp = "string",
#' IamRoleArn = "string",
#' ExternalId = "string",
#' SubscriptionType = "PRODUCTION",
#' ClientToken = "string",
#' SyslogIp = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_create_hsm
cloudhsm_create_hsm <- function(SubnetId, SshKey, EniIp = NULL, IamRoleArn, ExternalId = NULL, SubscriptionType, ClientToken = NULL, SyslogIp = NULL) {
op <- new_operation(
name = "CreateHsm",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$create_hsm_input(SubnetId = SubnetId, SshKey = SshKey, EniIp = EniIp, IamRoleArn = IamRoleArn, ExternalId = ExternalId, SubscriptionType = SubscriptionType, ClientToken = ClientToken, SyslogIp = SyslogIp)
output <- .cloudhsm$create_hsm_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$create_hsm <- cloudhsm_create_hsm
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Creates an HSM client.
#'
#' @usage
#' cloudhsm_create_luna_client(Label, Certificate)
#'
#' @param Label The label for the client.
#' @param Certificate [required] The contents of a Base64-Encoded X.509 v3 certificate to be installed on
#' the HSMs used by this client.
#'
#' @section Request syntax:
#' ```
#' svc$create_luna_client(
#' Label = "string",
#' Certificate = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_create_luna_client
cloudhsm_create_luna_client <- function(Label = NULL, Certificate) {
op <- new_operation(
name = "CreateLunaClient",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$create_luna_client_input(Label = Label, Certificate = Certificate)
output <- .cloudhsm$create_luna_client_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$create_luna_client <- cloudhsm_create_luna_client
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Deletes a high-availability partition group.
#'
#' @usage
#' cloudhsm_delete_hapg(HapgArn)
#'
#' @param HapgArn [required] The ARN of the high-availability partition group to delete.
#'
#' @section Request syntax:
#' ```
#' svc$delete_hapg(
#' HapgArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_delete_hapg
cloudhsm_delete_hapg <- function(HapgArn) {
op <- new_operation(
name = "DeleteHapg",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$delete_hapg_input(HapgArn = HapgArn)
output <- .cloudhsm$delete_hapg_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$delete_hapg <- cloudhsm_delete_hapg
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Deletes an HSM. After completion, this operation cannot be undone and
#' your key material cannot be recovered.
#'
#' @usage
#' cloudhsm_delete_hsm(HsmArn)
#'
#' @param HsmArn [required] The ARN of the HSM to delete.
#'
#' @section Request syntax:
#' ```
#' svc$delete_hsm(
#' HsmArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_delete_hsm
cloudhsm_delete_hsm <- function(HsmArn) {
op <- new_operation(
name = "DeleteHsm",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$delete_hsm_input(HsmArn = HsmArn)
output <- .cloudhsm$delete_hsm_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$delete_hsm <- cloudhsm_delete_hsm
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Deletes a client.
#'
#' @usage
#' cloudhsm_delete_luna_client(ClientArn)
#'
#' @param ClientArn [required] The ARN of the client to delete.
#'
#' @section Request syntax:
#' ```
#' svc$delete_luna_client(
#' ClientArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_delete_luna_client
cloudhsm_delete_luna_client <- function(ClientArn) {
op <- new_operation(
name = "DeleteLunaClient",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$delete_luna_client_input(ClientArn = ClientArn)
output <- .cloudhsm$delete_luna_client_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$delete_luna_client <- cloudhsm_delete_luna_client
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Retrieves information about a high-availability partition group.
#'
#' @usage
#' cloudhsm_describe_hapg(HapgArn)
#'
#' @param HapgArn [required] The ARN of the high-availability partition group to describe.
#'
#' @section Request syntax:
#' ```
#' svc$describe_hapg(
#' HapgArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_describe_hapg
cloudhsm_describe_hapg <- function(HapgArn) {
op <- new_operation(
name = "DescribeHapg",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$describe_hapg_input(HapgArn = HapgArn)
output <- .cloudhsm$describe_hapg_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$describe_hapg <- cloudhsm_describe_hapg
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Retrieves information about an HSM. You can identify the HSM by its ARN
#' or its serial number.
#'
#' @usage
#' cloudhsm_describe_hsm(HsmArn, HsmSerialNumber)
#'
#' @param HsmArn The ARN of the HSM. Either the `HsmArn` or the `SerialNumber` parameter
#' must be specified.
#' @param HsmSerialNumber The serial number of the HSM. Either the `HsmArn` or the
#' `HsmSerialNumber` parameter must be specified.
#'
#' @section Request syntax:
#' ```
#' svc$describe_hsm(
#' HsmArn = "string",
#' HsmSerialNumber = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_describe_hsm
cloudhsm_describe_hsm <- function(HsmArn = NULL, HsmSerialNumber = NULL) {
op <- new_operation(
name = "DescribeHsm",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$describe_hsm_input(HsmArn = HsmArn, HsmSerialNumber = HsmSerialNumber)
output <- .cloudhsm$describe_hsm_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$describe_hsm <- cloudhsm_describe_hsm
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Retrieves information about an HSM client.
#'
#' @usage
#' cloudhsm_describe_luna_client(ClientArn, CertificateFingerprint)
#'
#' @param ClientArn The ARN of the client.
#' @param CertificateFingerprint The certificate fingerprint.
#'
#' @section Request syntax:
#' ```
#' svc$describe_luna_client(
#' ClientArn = "string",
#' CertificateFingerprint = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_describe_luna_client
cloudhsm_describe_luna_client <- function(ClientArn = NULL, CertificateFingerprint = NULL) {
op <- new_operation(
name = "DescribeLunaClient",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$describe_luna_client_input(ClientArn = ClientArn, CertificateFingerprint = CertificateFingerprint)
output <- .cloudhsm$describe_luna_client_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$describe_luna_client <- cloudhsm_describe_luna_client
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Gets the configuration files necessary to connect to all high
#' availability partition groups the client is associated with.
#'
#' @usage
#' cloudhsm_get_config(ClientArn, ClientVersion, HapgList)
#'
#' @param ClientArn [required] The ARN of the client.
#' @param ClientVersion [required] The client version.
#' @param HapgList [required] A list of ARNs that identify the high-availability partition groups that
#' are associated with the client.
#'
#' @section Request syntax:
#' ```
#' svc$get_config(
#' ClientArn = "string",
#' ClientVersion = "5.1"|"5.3",
#' HapgList = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_get_config
cloudhsm_get_config <- function(ClientArn, ClientVersion, HapgList) {
op <- new_operation(
name = "GetConfig",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$get_config_input(ClientArn = ClientArn, ClientVersion = ClientVersion, HapgList = HapgList)
output <- .cloudhsm$get_config_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$get_config <- cloudhsm_get_config
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Lists the Availability Zones that have available AWS CloudHSM capacity.
#'
#' @usage
#' cloudhsm_list_available_zones()
#'
#' @section Request syntax:
#' ```
#' svc$list_available_zones()
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_list_available_zones
cloudhsm_list_available_zones <- function() {
op <- new_operation(
name = "ListAvailableZones",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$list_available_zones_input()
output <- .cloudhsm$list_available_zones_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$list_available_zones <- cloudhsm_list_available_zones
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Lists the high-availability partition groups for the account.
#'
#' This operation supports pagination with the use of the `NextToken`
#' member. If more results are available, the `NextToken` member of the
#' response contains a token that you pass in the next call to `ListHapgs`
#' to retrieve the next set of items.
#'
#' @usage
#' cloudhsm_list_hapgs(NextToken)
#'
#' @param NextToken The `NextToken` value from a previous call to `ListHapgs`. Pass null if
#' this is the first call.
#'
#' @section Request syntax:
#' ```
#' svc$list_hapgs(
#' NextToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_list_hapgs
cloudhsm_list_hapgs <- function(NextToken = NULL) {
op <- new_operation(
name = "ListHapgs",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$list_hapgs_input(NextToken = NextToken)
output <- .cloudhsm$list_hapgs_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$list_hapgs <- cloudhsm_list_hapgs
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Retrieves the identifiers of all of the HSMs provisioned for the current
#' customer.
#'
#' This operation supports pagination with the use of the `NextToken`
#' member. If more results are available, the `NextToken` member of the
#' response contains a token that you pass in the next call to `ListHsms`
#' to retrieve the next set of items.
#'
#' @usage
#' cloudhsm_list_hsms(NextToken)
#'
#' @param NextToken The `NextToken` value from a previous call to `ListHsms`. Pass null if
#' this is the first call.
#'
#' @section Request syntax:
#' ```
#' svc$list_hsms(
#' NextToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_list_hsms
cloudhsm_list_hsms <- function(NextToken = NULL) {
op <- new_operation(
name = "ListHsms",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$list_hsms_input(NextToken = NextToken)
output <- .cloudhsm$list_hsms_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$list_hsms <- cloudhsm_list_hsms
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Lists all of the clients.
#'
#' This operation supports pagination with the use of the `NextToken`
#' member. If more results are available, the `NextToken` member of the
#' response contains a token that you pass in the next call to
#' `ListLunaClients` to retrieve the next set of items.
#'
#' @usage
#' cloudhsm_list_luna_clients(NextToken)
#'
#' @param NextToken The `NextToken` value from a previous call to `ListLunaClients`. Pass
#' null if this is the first call.
#'
#' @section Request syntax:
#' ```
#' svc$list_luna_clients(
#' NextToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_list_luna_clients
cloudhsm_list_luna_clients <- function(NextToken = NULL) {
op <- new_operation(
name = "ListLunaClients",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$list_luna_clients_input(NextToken = NextToken)
output <- .cloudhsm$list_luna_clients_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$list_luna_clients <- cloudhsm_list_luna_clients
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Returns a list of all tags for the specified AWS CloudHSM resource.
#'
#' @usage
#' cloudhsm_list_tags_for_resource(ResourceArn)
#'
#' @param ResourceArn [required] The Amazon Resource Name (ARN) of the AWS CloudHSM resource.
#'
#' @section Request syntax:
#' ```
#' svc$list_tags_for_resource(
#' ResourceArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_list_tags_for_resource
cloudhsm_list_tags_for_resource <- function(ResourceArn) {
op <- new_operation(
name = "ListTagsForResource",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$list_tags_for_resource_input(ResourceArn = ResourceArn)
output <- .cloudhsm$list_tags_for_resource_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$list_tags_for_resource <- cloudhsm_list_tags_for_resource
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Modifies an existing high-availability partition group.
#'
#' @usage
#' cloudhsm_modify_hapg(HapgArn, Label, PartitionSerialList)
#'
#' @param HapgArn [required] The ARN of the high-availability partition group to modify.
#' @param Label The new label for the high-availability partition group.
#' @param PartitionSerialList The list of partition serial numbers to make members of the
#' high-availability partition group.
#'
#' @section Request syntax:
#' ```
#' svc$modify_hapg(
#' HapgArn = "string",
#' Label = "string",
#' PartitionSerialList = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_modify_hapg
cloudhsm_modify_hapg <- function(HapgArn, Label = NULL, PartitionSerialList = NULL) {
op <- new_operation(
name = "ModifyHapg",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$modify_hapg_input(HapgArn = HapgArn, Label = Label, PartitionSerialList = PartitionSerialList)
output <- .cloudhsm$modify_hapg_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$modify_hapg <- cloudhsm_modify_hapg
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Modifies an HSM.
#'
#' This operation can result in the HSM being offline for up to 15 minutes
#' while the AWS CloudHSM service is reconfigured. If you are modifying a
#' production HSM, you should ensure that your AWS CloudHSM service is
#' configured for high availability, and consider executing this operation
#' during a maintenance window.
#'
#' @usage
#' cloudhsm_modify_hsm(HsmArn, SubnetId, EniIp, IamRoleArn, ExternalId,
#' SyslogIp)
#'
#' @param HsmArn [required] The ARN of the HSM to modify.
#' @param SubnetId The new identifier of the subnet that the HSM is in. The new subnet must
#' be in the same Availability Zone as the current subnet.
#' @param EniIp The new IP address for the elastic network interface (ENI) attached to
#' the HSM.
#'
#' If the HSM is moved to a different subnet, and an IP address is not
#' specified, an IP address will be randomly chosen from the CIDR range of
#' the new subnet.
#' @param IamRoleArn The new IAM role ARN.
#' @param ExternalId The new external ID.
#' @param SyslogIp The new IP address for the syslog monitoring server. The AWS CloudHSM
#' service only supports one syslog monitoring server.
#'
#' @section Request syntax:
#' ```
#' svc$modify_hsm(
#' HsmArn = "string",
#' SubnetId = "string",
#' EniIp = "string",
#' IamRoleArn = "string",
#' ExternalId = "string",
#' SyslogIp = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_modify_hsm
cloudhsm_modify_hsm <- function(HsmArn, SubnetId = NULL, EniIp = NULL, IamRoleArn = NULL, ExternalId = NULL, SyslogIp = NULL) {
op <- new_operation(
name = "ModifyHsm",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$modify_hsm_input(HsmArn = HsmArn, SubnetId = SubnetId, EniIp = EniIp, IamRoleArn = IamRoleArn, ExternalId = ExternalId, SyslogIp = SyslogIp)
output <- .cloudhsm$modify_hsm_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$modify_hsm <- cloudhsm_modify_hsm
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Modifies the certificate used by the client.
#'
#' This action can potentially start a workflow to install the new
#' certificate on the client\'s HSMs.
#'
#' @usage
#' cloudhsm_modify_luna_client(ClientArn, Certificate)
#'
#' @param ClientArn [required] The ARN of the client.
#' @param Certificate [required] The new certificate for the client.
#'
#' @section Request syntax:
#' ```
#' svc$modify_luna_client(
#' ClientArn = "string",
#' Certificate = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_modify_luna_client
cloudhsm_modify_luna_client <- function(ClientArn, Certificate) {
op <- new_operation(
name = "ModifyLunaClient",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$modify_luna_client_input(ClientArn = ClientArn, Certificate = Certificate)
output <- .cloudhsm$modify_luna_client_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$modify_luna_client <- cloudhsm_modify_luna_client
#' This is documentation for AWS CLOUDHSM CLASSIC
#'
#' This is documentation for **AWS CloudHSM Classic**. For more
#' information, see [AWS CloudHSM Classic
#' FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM
#' Classic User
#' Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
#' [AWS CloudHSM Classic API
#' Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
#'
#' **For information about the current version of AWS CloudHSM**, see [AWS
#' CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User
#' Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the
#' [AWS CloudHSM API
#' Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
#'
#' Removes one or more tags from the specified AWS CloudHSM resource.
#'
#' To remove a tag, specify only the tag key to remove (not the value). To
#' overwrite the value for an existing tag, use AddTagsToResource.
#'
#' @usage
#' cloudhsm_remove_tags_from_resource(ResourceArn, TagKeyList)
#'
#' @param ResourceArn [required] The Amazon Resource Name (ARN) of the AWS CloudHSM resource.
#' @param TagKeyList [required] The tag key or keys to remove.
#'
#' Specify only the tag key to remove (not the value). To overwrite the
#' value for an existing tag, use AddTagsToResource.
#'
#' @section Request syntax:
#' ```
#' svc$remove_tags_from_resource(
#' ResourceArn = "string",
#' TagKeyList = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudhsm_remove_tags_from_resource
cloudhsm_remove_tags_from_resource <- function(ResourceArn, TagKeyList) {
op <- new_operation(
name = "RemoveTagsFromResource",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .cloudhsm$remove_tags_from_resource_input(ResourceArn = ResourceArn, TagKeyList = TagKeyList)
output <- .cloudhsm$remove_tags_from_resource_output()
svc <- .cloudhsm$service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudhsm$operations$remove_tags_from_resource <- cloudhsm_remove_tags_from_resource
|
setwd("Documents/03_Syntenic_linkage/01_Data/OrthoFinder/input/gff_files")
setwd("W:/Thomas/Data/synteny/synteny_iadhore/")
species <- unlist(lapply(strsplit(
list.files()[grep(list.files(), pattern=".gff")], split=".gff"), "[", 1))
length(species) ## 126
##species <- c("arath", "zeama", "orysa", "theca")
## get all combinations
combinations <- combn(species, 2)
combinations <- cbind(combinations, rbind(species, species))
dim(combinations) ## 2 8001
setwd("W:/Thomas/Data/synteny/synteny_iadhore/")
type <- "MCL" ## or "MCL"
if (type == "OrthoFinder") {
setwd("lst_files_orthofinder/")
}
if (type == "MCL") {
setwd("lst_files_mcl/")
}
## iterate through combinations and write ini file
for (i in 1:dim(combinations)[2]) {
if (type == "OrthoFinder") {
genome_path <- "./lst_files_orthofinder"
blast_table <- "blast_table= ./family_orthogroups_iadhore.txt"
output_path <- "./output/output"
final_path <- "../ini_files/"
}
if (type == "MCL") {
genome_path <- "./lst_files_mcl"
blast_table <- "blast_table= ./family_orthogroups_mcl_iadhore.txt"
output_path <- "./output_mcl/output"
final_path <- "../ini_files_mcl/"
}
final <- rbind(
matrix(
c(paste("genome=", combinations[1, i], sep = " "),
paste(
gsub(list.files(combinations[1, i]), pattern = ".lst",
replacement = ""),
paste(genome_path, combinations[1, i],
list.files(combinations[1, i]), sep = "/"), sep = " "),
""
)),
matrix(
c(paste("genome=", combinations[2, i], sep = " "),
paste(
gsub(list.files(combinations[2, i]), pattern = ".lst",
replacement = ""),
paste(genome_path, combinations[2, i],
list.files(combinations[2,i]), sep = "/"), sep = " "),
""
)),
matrix(
c(blast_table,
"",
"table_type= family",
"cluster_type= collinear",
paste("output_path=", paste(output_path, combinations[1, i],
combinations[2, i], sep = "_"), sep = " "),
"",
"alignment_method=gg2",
"gap_size=15",
"cluster_gap=20",
"max_gaps_in_alignment=20",
"q_value=0.9",
"prob_cutoff=0.001",
"anchor_points=5",
"level_2_only=true",
"write_stats=true",
"number_of_threads=4"
))
)
write.table(final,
file = paste(final_path, "iadhore_",
paste(combinations[1 ,i], combinations[2, i], sep = "_"),
".ini", sep = ""),
quote = FALSE, col.names = FALSE, row.names = FALSE)
}
| /02_create_ini_files_for_each_combination.R | no_license | tnaake/PKS_synteny | R | false | false | 2,974 | r | setwd("Documents/03_Syntenic_linkage/01_Data/OrthoFinder/input/gff_files")
setwd("W:/Thomas/Data/synteny/synteny_iadhore/")
species <- unlist(lapply(strsplit(
list.files()[grep(list.files(), pattern=".gff")], split=".gff"), "[", 1))
length(species) ## 126
##species <- c("arath", "zeama", "orysa", "theca")
## get all combinations
combinations <- combn(species, 2)
combinations <- cbind(combinations, rbind(species, species))
dim(combinations) ## 2 8001
setwd("W:/Thomas/Data/synteny/synteny_iadhore/")
type <- "MCL" ## or "MCL"
if (type == "OrthoFinder") {
setwd("lst_files_orthofinder/")
}
if (type == "MCL") {
setwd("lst_files_mcl/")
}
## iterate through combinations and write ini file
for (i in 1:dim(combinations)[2]) {
if (type == "OrthoFinder") {
genome_path <- "./lst_files_orthofinder"
blast_table <- "blast_table= ./family_orthogroups_iadhore.txt"
output_path <- "./output/output"
final_path <- "../ini_files/"
}
if (type == "MCL") {
genome_path <- "./lst_files_mcl"
blast_table <- "blast_table= ./family_orthogroups_mcl_iadhore.txt"
output_path <- "./output_mcl/output"
final_path <- "../ini_files_mcl/"
}
final <- rbind(
matrix(
c(paste("genome=", combinations[1, i], sep = " "),
paste(
gsub(list.files(combinations[1, i]), pattern = ".lst",
replacement = ""),
paste(genome_path, combinations[1, i],
list.files(combinations[1, i]), sep = "/"), sep = " "),
""
)),
matrix(
c(paste("genome=", combinations[2, i], sep = " "),
paste(
gsub(list.files(combinations[2, i]), pattern = ".lst",
replacement = ""),
paste(genome_path, combinations[2, i],
list.files(combinations[2,i]), sep = "/"), sep = " "),
""
)),
matrix(
c(blast_table,
"",
"table_type= family",
"cluster_type= collinear",
paste("output_path=", paste(output_path, combinations[1, i],
combinations[2, i], sep = "_"), sep = " "),
"",
"alignment_method=gg2",
"gap_size=15",
"cluster_gap=20",
"max_gaps_in_alignment=20",
"q_value=0.9",
"prob_cutoff=0.001",
"anchor_points=5",
"level_2_only=true",
"write_stats=true",
"number_of_threads=4"
))
)
write.table(final,
file = paste(final_path, "iadhore_",
paste(combinations[1 ,i], combinations[2, i], sep = "_"),
".ini", sep = ""),
quote = FALSE, col.names = FALSE, row.names = FALSE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MultiAssayExperiment-methods.R
\docType{methods}
\name{rearrange}
\alias{rearrange}
\alias{rearrange,ANY-method}
\alias{rearrange,RangedRaggedAssay-method}
\alias{rearrange,ExperimentList-method}
\alias{rearrange,MultiAssayExperiment-method}
\title{Reshape raw data from an object}
\usage{
rearrange(object, shape = "long", ...)
\S4method{rearrange}{ANY}(object, shape = "long", ...)
\S4method{rearrange}{RangedRaggedAssay}(object, shape = "long", ...)
\S4method{rearrange}{ExperimentList}(object, shape = "long", ...)
\S4method{rearrange}{MultiAssayExperiment}(object, shape = "long",
pDataCols = NULL, ...)
}
\arguments{
\item{object}{Any supported class object}
\item{shape}{A single string indicating the shape of the resulting data,
options include \sQuote{long} and \sQuote{wide} (defaults to the former)}
\item{...}{Additional arguments for the \link{RangedRaggedAssay}
\code{assay} method. See below.}
\item{pDataCols}{selected pData columns to include in the resulting output}
}
\value{
Either a long or wide \code{\linkS4class{DataFrame}}
}
\description{
The rearrange function takes data from the \code{\link{ExperimentList}}
in a \code{\link{MultiAssayExperiment}} and returns a uniform
\code{\link{DataFrame}}. The resulting DataFrame has columns indicating
primary, rowname, colname and value. This method can optionally include
pData columns with the \code{pDataCols} argument for a
\code{MultiAssayExperiment} object.
}
\section{Methods (by class)}{
\itemize{
\item \code{ANY}: ANY class method, works with classes such as
\link{ExpressionSet} and \link{SummarizedExperiment} as well as \code{matrix}
\item \code{RangedRaggedAssay}: \linkS4class{RangedRaggedAssay} class method to return
matrix of selected \dQuote{mcolname} column, defaults to score
\item \code{ExperimentList}: Rearrange data from the \code{ExperimentList} class
returns list of DataFrames
\item \code{MultiAssayExperiment}: Overarching \code{MultiAssayExperiment} class method
returns a small and skinny DataFrame. The \code{pDataCols} arguments allows
the user to append pData columns to the long and skinny DataFrame.
}}
\examples{
example("RangedRaggedAssay")
rearrange(myRRA, background = 0)
}
\seealso{
\code{\link{assay,RangedRaggedAssay,missing-method}}
}
| /man/rearrange.Rd | no_license | minghao2016/MultiAssayExperiment | R | false | true | 2,341 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MultiAssayExperiment-methods.R
\docType{methods}
\name{rearrange}
\alias{rearrange}
\alias{rearrange,ANY-method}
\alias{rearrange,RangedRaggedAssay-method}
\alias{rearrange,ExperimentList-method}
\alias{rearrange,MultiAssayExperiment-method}
\title{Reshape raw data from an object}
\usage{
rearrange(object, shape = "long", ...)
\S4method{rearrange}{ANY}(object, shape = "long", ...)
\S4method{rearrange}{RangedRaggedAssay}(object, shape = "long", ...)
\S4method{rearrange}{ExperimentList}(object, shape = "long", ...)
\S4method{rearrange}{MultiAssayExperiment}(object, shape = "long",
pDataCols = NULL, ...)
}
\arguments{
\item{object}{Any supported class object}
\item{shape}{A single string indicating the shape of the resulting data,
options include \sQuote{long} and \sQuote{wide} (defaults to the former)}
\item{...}{Additional arguments for the \link{RangedRaggedAssay}
\code{assay} method. See below.}
\item{pDataCols}{selected pData columns to include in the resulting output}
}
\value{
Either a long or wide \code{\linkS4class{DataFrame}}
}
\description{
The rearrange function takes data from the \code{\link{ExperimentList}}
in a \code{\link{MultiAssayExperiment}} and returns a uniform
\code{\link{DataFrame}}. The resulting DataFrame has columns indicating
primary, rowname, colname and value. This method can optionally include
pData columns with the \code{pDataCols} argument for a
\code{MultiAssayExperiment} object.
}
\section{Methods (by class)}{
\itemize{
\item \code{ANY}: ANY class method, works with classes such as
\link{ExpressionSet} and \link{SummarizedExperiment} as well as \code{matrix}
\item \code{RangedRaggedAssay}: \linkS4class{RangedRaggedAssay} class method to return
matrix of selected \dQuote{mcolname} column, defaults to score
\item \code{ExperimentList}: Rearrange data from the \code{ExperimentList} class
returns list of DataFrames
\item \code{MultiAssayExperiment}: Overarching \code{MultiAssayExperiment} class method
returns a small and skinny DataFrame. The \code{pDataCols} arguments allows
the user to append pData columns to the long and skinny DataFrame.
}}
\examples{
example("RangedRaggedAssay")
rearrange(myRRA, background = 0)
}
\seealso{
\code{\link{assay,RangedRaggedAssay,missing-method}}
}
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel(h3("National Health and Nutrition Examination Survey Access Portal")),
sidebarPanel(
fluidRow(
h4("Instructions"),
h6("1: Below, Select desired year"),
h6("2: Click on 'Show columns'"),
h6("3: Select desired column"),
h6("4: Click 'Retrieve demographic data'"),
h6("5: For clarity, select 'Translate code'"),
h6("6: Finally, click 'Generate Summary'"),
br(),
selectInput("dataset", label = h4("Select NHANES data set"), choices = list(' '), selected = ' '),
##This action button will initiate downloading of data
actionButton(inputId = "getdemographicdata", label = "Show columns", value = '' ),
conditionalPanel(condition = "input.getdemographicdata > 0",
br(),
radioButtons("choose_columns", label = "Select Column",
choices = list('')),
br(),
actionButton(inputId = "previewcolumn", label = "Retrieve demographic data"),
# br(),
#br(),
conditionalPanel(condition = "input.choose_columns != 0 & input.choose_columns != 'age'",
br(),
checkboxInput(inputId="translatecode", label = "Translate code", value=FALSE)
)
#,
# textOutput("setColumn")#,
# br(),
# a(href = "https://gist.github.com/4211337", "Source code")
))),#),
mainPanel(
fluidRow(
column(5,
uiOutput("echo_request"),
br(),
tableOutput("column_data")
),
column(7,
conditionalPanel(condition = "input.previewcolumn > 0",
actionButton(inputId = "showtable", label = "Generate Summary"),
conditionalPanel(condition = "input.showtable > 0",
br(),
uiOutput("summary_request"),
tableOutput("column_summary"),
uiOutput("compute_totals")
)
))
)
)
))
| /ui.R | no_license | cjendres1/NHANESportal | R | false | false | 2,077 | r | library(shiny)
shinyUI(pageWithSidebar(
headerPanel(h3("National Health and Nutrition Examination Survey Access Portal")),
sidebarPanel(
fluidRow(
h4("Instructions"),
h6("1: Below, Select desired year"),
h6("2: Click on 'Show columns'"),
h6("3: Select desired column"),
h6("4: Click 'Retrieve demographic data'"),
h6("5: For clarity, select 'Translate code'"),
h6("6: Finally, click 'Generate Summary'"),
br(),
selectInput("dataset", label = h4("Select NHANES data set"), choices = list(' '), selected = ' '),
##This action button will initiate downloading of data
actionButton(inputId = "getdemographicdata", label = "Show columns", value = '' ),
conditionalPanel(condition = "input.getdemographicdata > 0",
br(),
radioButtons("choose_columns", label = "Select Column",
choices = list('')),
br(),
actionButton(inputId = "previewcolumn", label = "Retrieve demographic data"),
# br(),
#br(),
conditionalPanel(condition = "input.choose_columns != 0 & input.choose_columns != 'age'",
br(),
checkboxInput(inputId="translatecode", label = "Translate code", value=FALSE)
)
#,
# textOutput("setColumn")#,
# br(),
# a(href = "https://gist.github.com/4211337", "Source code")
))),#),
mainPanel(
fluidRow(
column(5,
uiOutput("echo_request"),
br(),
tableOutput("column_data")
),
column(7,
conditionalPanel(condition = "input.previewcolumn > 0",
actionButton(inputId = "showtable", label = "Generate Summary"),
conditionalPanel(condition = "input.showtable > 0",
br(),
uiOutput("summary_request"),
tableOutput("column_summary"),
uiOutput("compute_totals")
)
))
)
)
))
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinydashboard)
library(plotly)
library(ggplot2)
TBS <- read.csv("TPP.csv")
programs <- read.csv("programs.csv")
choice <- unique(factor(TBS$Department))
info <- tibble(programs$Department,programs$DRF.Program, programs$Description.EN)
info <- info%>% filter(programs$Department == "Agriculture and Agri-Food Canada")
header <- dashboardHeader(
titleWidth = 700,
title = "Exploring Government Funding to Businesses"
)
sidebar <- dashboardSidebar(
width = 300,
sidebarMenu(
menuItem("English",icon=icon("far fa-credit-card")),
menuItem("French",icon=icon("far fa-credit-card"))
)
)
body <- dashboardBody(
plotlyOutput("NTrans"),
fluidRow(
selectInput(
"select_department",
"Breakdown by Departments",
choices = choice,
selected= "Agriculture and Agri-Food Canada"
),
tableOutput("test")
)
)
ui<-dashboardPage(skin ="blue",
header, sidebar, body
)
server <- (function(input,output,session){
sum_dep <- TBS %>%
group_by(Department) %>%
summarise(count=n())
output$NTrans <- renderPlotly({
p <- plot_ly(sum_dep, labels= ~Department, values = ~count) %>% add_pie(hole = 0.6) %>%
layout(title = "Number of Transfer Payment Program",
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE))
})
output$test <- renderTable({
info
})
})
shinyApp(ui = ui, server = server) | /CanDev_Dashboard_Script.R | no_license | zorinahan1024/Candev_TBS2480 | R | false | false | 1,966 | r | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinydashboard)
library(plotly)
library(ggplot2)
TBS <- read.csv("TPP.csv")
programs <- read.csv("programs.csv")
choice <- unique(factor(TBS$Department))
info <- tibble(programs$Department,programs$DRF.Program, programs$Description.EN)
info <- info%>% filter(programs$Department == "Agriculture and Agri-Food Canada")
header <- dashboardHeader(
titleWidth = 700,
title = "Exploring Government Funding to Businesses"
)
sidebar <- dashboardSidebar(
width = 300,
sidebarMenu(
menuItem("English",icon=icon("far fa-credit-card")),
menuItem("French",icon=icon("far fa-credit-card"))
)
)
body <- dashboardBody(
plotlyOutput("NTrans"),
fluidRow(
selectInput(
"select_department",
"Breakdown by Departments",
choices = choice,
selected= "Agriculture and Agri-Food Canada"
),
tableOutput("test")
)
)
ui<-dashboardPage(skin ="blue",
header, sidebar, body
)
server <- (function(input,output,session){
sum_dep <- TBS %>%
group_by(Department) %>%
summarise(count=n())
output$NTrans <- renderPlotly({
p <- plot_ly(sum_dep, labels= ~Department, values = ~count) %>% add_pie(hole = 0.6) %>%
layout(title = "Number of Transfer Payment Program",
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE))
})
output$test <- renderTable({
info
})
})
shinyApp(ui = ui, server = server) |
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{theme_excel}
\alias{theme_excel}
\title{ggplot color theme based on old Excel plots}
\usage{
theme_excel(base_size = 12, base_family = "", horizontal = TRUE)
}
\arguments{
\item{base_size}{\code{numeric} base font size}
\item{base_family}{\code{character} base font family}
\item{horizontal}{\code{logical}. Horizontal axis lines?}
}
\value{
An object of class \code{\link{theme}}.
}
\description{
Theme to replicate the ugly monstrosity that was the old
gray-background Excel chart. Please never use this.
}
\examples{
dsamp <- diamonds[sample(nrow(diamonds), 1000), ]
# Old line color palette
(qplot(carat, price, data=dsamp, colour=clarity)
+ theme_excel()
+ scale_colour_excel() )
# Old fill color palette
(ggplot(diamonds, aes(clarity, fill=cut))
+ geom_bar()
+ scale_fill_excel("fill")
+ theme_excel())
}
| /man/theme_excel.Rd | no_license | Mokubyow/RFtheme | R | false | false | 875 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{theme_excel}
\alias{theme_excel}
\title{ggplot color theme based on old Excel plots}
\usage{
theme_excel(base_size = 12, base_family = "", horizontal = TRUE)
}
\arguments{
\item{base_size}{\code{numeric} base font size}
\item{base_family}{\code{character} base font family}
\item{horizontal}{\code{logical}. Horizontal axis lines?}
}
\value{
An object of class \code{\link{theme}}.
}
\description{
Theme to replicate the ugly monstrosity that was the old
gray-background Excel chart. Please never use this.
}
\examples{
dsamp <- diamonds[sample(nrow(diamonds), 1000), ]
# Old line color palette
(qplot(carat, price, data=dsamp, colour=clarity)
+ theme_excel()
+ scale_colour_excel() )
# Old fill color palette
(ggplot(diamonds, aes(clarity, fill=cut))
+ geom_bar()
+ scale_fill_excel("fill")
+ theme_excel())
}
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.8,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/upper_aerodigestive_tract/upper_aerodigestive_tract_084.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/upper_aerodigestive_tract/upper_aerodigestive_tract_084.R | no_license | leon1003/QSMART | R | false | false | 407 | r | library(glmnet)
mydata = read.table("./TrainingSet/RF/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.8,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/upper_aerodigestive_tract/upper_aerodigestive_tract_084.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#' @rdname confusion.matrix.positive.likelihood.ratio.simple
confusion.matrix.positive.likelihood.ratio <- function(
confusion.matrix
) {
confusion.matrix.positive.likelihood.ratio.simple(
true.positive = confusion.matrix.true.positive(confusion.matrix)
,false.negative = confusion.matrix.false.negative(confusion.matrix)
,false.positive = confusion.matrix.false.positive(confusion.matrix)
,true.negative = confusion.matrix.true.negative(confusion.matrix)
)
} | /R/confusion.matrix.positive.likelihood.ratio.R | permissive | burrm/lolcat | R | false | false | 482 | r | #' @rdname confusion.matrix.positive.likelihood.ratio.simple
confusion.matrix.positive.likelihood.ratio <- function(
confusion.matrix
) {
confusion.matrix.positive.likelihood.ratio.simple(
true.positive = confusion.matrix.true.positive(confusion.matrix)
,false.negative = confusion.matrix.false.negative(confusion.matrix)
,false.positive = confusion.matrix.false.positive(confusion.matrix)
,true.negative = confusion.matrix.true.negative(confusion.matrix)
)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/admin_functions.R
\name{directory.asps.list}
\alias{directory.asps.list}
\title{List the ASPs issued by a user.}
\usage{
directory.asps.list(userKey)
}
\arguments{
\item{userKey}{Identifies the user in the API request}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/admin.directory.user.security
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/admin.directory.user.security)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/admin-sdk/directory/}{Google Documentation}
}
| /googleadmindirectoryv1.auto/man/directory.asps.list.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 844 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/admin_functions.R
\name{directory.asps.list}
\alias{directory.asps.list}
\title{List the ASPs issued by a user.}
\usage{
directory.asps.list(userKey)
}
\arguments{
\item{userKey}{Identifies the user in the API request}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/admin.directory.user.security
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/admin.directory.user.security)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/admin-sdk/directory/}{Google Documentation}
}
|
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##'
##' @title
##' @param models
generate.interaction.test.dt <- function(models) {
if (!('list' %in% class(models))) {
models = list(models)
}
interaction.test.dt = data.table()
# Go through the models
for (model in models) {
# Get parameters
input.dt = as.data.table(model$model)
outcome = names(model$model)[1]
predictor = labels(terms(model))[1]
covariates = labels(terms(model))[-1]
# Go through the
for (covariate in covariates) {
interaction.term = paste0(predictor, '*', covariate)
if ('rq' %in% class(model)) {
tau = model$tau
new.model = tryCatch(
generate.quantile.regression.models(
input.dt = input.dt,
outcome = outcome,
predictors = predictor,
covariates = c(covariates, interaction.term),
tau = tau
),
error = function(e) NULL
)
if (!is.null(new.model)) {
w = anova(model, new.model[[1]], test = "Wald")
} else {
w = NULL
}
row.dt = cbind(
data.table(
outcome = outcome,
covariate = covariate,
interaction.term = interaction.term,
tau = tau
),
data.table(w$table)
)
} else if ('glm' %in% class(model)) {
new.model = generate.regression.model(
input.dt = input.dt,
outcome = outcome,
predictors = predictor,
covariates = c(covariates, interaction.term)
)
w = lrtest(model, new.model)
row.dt =
data.table(
outcome = outcome,
covariate = covariate,
interaction.term = interaction.term,
ndf = w[2, "Df"],
Chisq = w[2, "Chisq"],
pvalue = w[2, "Pr(>Chisq)"]
)
}
interaction.test.dt = rbindlist(list(interaction.test.dt,
row.dt),
fill = TRUE)
}
}
interaction.test.dt[, pvalue.fdr := p.adjust(pvalue, method = "fdr")]
return(interaction.test.dt)
}
| /R/generate.interaction.test.dt.R | no_license | mattmoo/checkwho_analysis | R | false | false | 2,374 | r | ##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##'
##' @title
##' @param models
generate.interaction.test.dt <- function(models) {
if (!('list' %in% class(models))) {
models = list(models)
}
interaction.test.dt = data.table()
# Go through the models
for (model in models) {
# Get parameters
input.dt = as.data.table(model$model)
outcome = names(model$model)[1]
predictor = labels(terms(model))[1]
covariates = labels(terms(model))[-1]
# Go through the
for (covariate in covariates) {
interaction.term = paste0(predictor, '*', covariate)
if ('rq' %in% class(model)) {
tau = model$tau
new.model = tryCatch(
generate.quantile.regression.models(
input.dt = input.dt,
outcome = outcome,
predictors = predictor,
covariates = c(covariates, interaction.term),
tau = tau
),
error = function(e) NULL
)
if (!is.null(new.model)) {
w = anova(model, new.model[[1]], test = "Wald")
} else {
w = NULL
}
row.dt = cbind(
data.table(
outcome = outcome,
covariate = covariate,
interaction.term = interaction.term,
tau = tau
),
data.table(w$table)
)
} else if ('glm' %in% class(model)) {
new.model = generate.regression.model(
input.dt = input.dt,
outcome = outcome,
predictors = predictor,
covariates = c(covariates, interaction.term)
)
w = lrtest(model, new.model)
row.dt =
data.table(
outcome = outcome,
covariate = covariate,
interaction.term = interaction.term,
ndf = w[2, "Df"],
Chisq = w[2, "Chisq"],
pvalue = w[2, "Pr(>Chisq)"]
)
}
interaction.test.dt = rbindlist(list(interaction.test.dt,
row.dt),
fill = TRUE)
}
}
interaction.test.dt[, pvalue.fdr := p.adjust(pvalue, method = "fdr")]
return(interaction.test.dt)
}
|
system.time({
highest_chain = 0;
do.collatz <- function(x) {
ifelse(x %% 2 == 0, x / 2, 3*x + 1)
}
#[1] 524
#[1] 837799
#user system elapsed
#5849.078 199.860 6341.996
starting_number = 999999
next_number = starting_number
complete_set <- c(1:starting_number)
left_overs <- complete_set
answer <- 0
vector <- c()
while (length(left_overs) > 0) {
number <- left_overs[length(left_overs)]
tmpNumber <- number
vector <- c()
vector <- c(vector, number)
while (do.collatz(number) != 1) {
next_number = do.collatz(number)
vector <- c(vector, next_number)
number <- next_number
}
if (highest_chain < length(vector)) {
highest_chain = length(vector)
answer <- tmpNumber
}
left_overs <- left_overs[!left_overs %in% vector]
}
print(highest_chain)
print(answer)
})
| /Week14/euler14_jsmykil.r | no_license | britannica/euler-club | R | false | false | 815 | r | system.time({
highest_chain = 0;
do.collatz <- function(x) {
ifelse(x %% 2 == 0, x / 2, 3*x + 1)
}
#[1] 524
#[1] 837799
#user system elapsed
#5849.078 199.860 6341.996
starting_number = 999999
next_number = starting_number
complete_set <- c(1:starting_number)
left_overs <- complete_set
answer <- 0
vector <- c()
while (length(left_overs) > 0) {
number <- left_overs[length(left_overs)]
tmpNumber <- number
vector <- c()
vector <- c(vector, number)
while (do.collatz(number) != 1) {
next_number = do.collatz(number)
vector <- c(vector, next_number)
number <- next_number
}
if (highest_chain < length(vector)) {
highest_chain = length(vector)
answer <- tmpNumber
}
left_overs <- left_overs[!left_overs %in% vector]
}
print(highest_chain)
print(answer)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/st_crs2.R
\name{st_crs2}
\alias{st_crs2}
\title{Retrieve coordinate reference system from sf or sfc object}
\usage{
st_crs2(x, ...)
}
\arguments{
\item{x}{numeric, character, or object of class \link{sf} or \link{sfc}, being:
\itemize{
\item EPSG code: numeric (e.g. \code{32632}) or character (in the form
\code{"32632"} or \code{"EPSG:32632"});
\item UTM zone: numeric (e.g. \code{32}, interpreted as 32 North) or character
(e.g. \code{"32"} or \code{"32N"} for zone 32 North, \code{"32S"} for 32 South);
\item WKT test: passed as character string or as path of a text file
containing it (e.g. the path of a .prj file);
\item PROJ.4 string, passed as character (e.g.
\code{"+proj=utm +zone=32 +datum=WGS84 +units=m +no_defs"}
(\strong{NOTE}: this representation is deprecated with PROJ >= 6
-- see http://rgdal.r-forge.r-project.org/articles/PROJ6_GDAL3.html --
so a warning is returned using it, unless the string contains only
the epsg code -- e.g. \code{"+init=epsg:32632"}, in which case the EPSG
code is taken);
\item path of a spatial file (managed by \link[sf:st_read]{sf::st_read} or \link[stars:read_stars]{stars::read_stars}),
passed as character string of length 1;
\item spatial file of class \link{sf} or \link{sfc}.
}}
\item{...}{other parameters passed to \link[sf:st_crs]{sf::st_crs}.}
}
\value{
An object of class \link{crs} of length 2.
}
\description{
This function is a wrapper for \link[sf:st_crs]{sf::st_crs}, unless
threating numeric \code{character} strings as integers, and
accepting also UTM timezones, paths of spatial files and paths of
text files containing WKT like .prj (see details) .
}
\details{
See \link[sf:st_crs]{sf::st_crs} for details.
}
\note{
License: GPL 3.0
}
\examples{
## CRS from EPSG
st_crs2(32609)
st_crs2("EPSG:32609")
## CRS from UTM zone
st_crs2(9)
st_crs2("09")
st_crs2("9N")
st_crs2("09S")
## CRS from WKT (string or path)
(wkt_32n <- sf::st_as_text(sf::st_crs(32609)))
st_crs2(wkt_32n)
writeLines(wkt_32n, wkt_32n_path <- tempfile())
st_crs2(wkt_32n_path)
\dontrun{
## CRS from spatial file path
raster_path <- system.file(
"extdata/out/S2A2A_20190723_022_Barbellino_BOA_10.tif",
package="sen2r"
)
vector_path <- system.file(
"extdata/vector/barbellino.geojson",
package="sen2r"
)
try( st_crs2(raster_path) )
st_crs2(vector_path)
## CRS from spatial files
st_crs2(stars::read_stars(raster_path))
st_crs2(sf::read_sf(vector_path))
## CRS from PROJ.4 string
# (avoid using this with PROJ >= 6!)
st_crs2("+init=epsg:32609") # this makes use of the EPSG code
st_crs2("+proj=utm +zone=9 +datum=WGS84 +units=m +no_defs")
st_crs2(raster::raster(raster_path)) # st_crs(raster) uses the PROJ.4 as input
}
}
\references{
L. Ranghetti, M. Boschetti, F. Nutini, L. Busetto (2020).
"sen2r": An R toolbox for automatically downloading and preprocessing
Sentinel-2 satellite data. \emph{Computers & Geosciences}, 139, 104473.
\doi{10.1016/j.cageo.2020.104473}, URL: \url{https://sen2r.ranghetti.info/}.
}
\author{
Luigi Ranghetti, phD (2019)
}
| /man/st_crs2.Rd | no_license | cran/sen2r | R | false | true | 3,175 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/st_crs2.R
\name{st_crs2}
\alias{st_crs2}
\title{Retrieve coordinate reference system from sf or sfc object}
\usage{
st_crs2(x, ...)
}
\arguments{
\item{x}{numeric, character, or object of class \link{sf} or \link{sfc}, being:
\itemize{
\item EPSG code: numeric (e.g. \code{32632}) or character (in the form
\code{"32632"} or \code{"EPSG:32632"});
\item UTM zone: numeric (e.g. \code{32}, interpreted as 32 North) or character
(e.g. \code{"32"} or \code{"32N"} for zone 32 North, \code{"32S"} for 32 South);
\item WKT test: passed as character string or as path of a text file
containing it (e.g. the path of a .prj file);
\item PROJ.4 string, passed as character (e.g.
\code{"+proj=utm +zone=32 +datum=WGS84 +units=m +no_defs"}
(\strong{NOTE}: this representation is deprecated with PROJ >= 6
-- see http://rgdal.r-forge.r-project.org/articles/PROJ6_GDAL3.html --
so a warning is returned using it, unless the string contains only
the epsg code -- e.g. \code{"+init=epsg:32632"}, in which case the EPSG
code is taken);
\item path of a spatial file (managed by \link[sf:st_read]{sf::st_read} or \link[stars:read_stars]{stars::read_stars}),
passed as character string of length 1;
\item spatial file of class \link{sf} or \link{sfc}.
}}
\item{...}{other parameters passed to \link[sf:st_crs]{sf::st_crs}.}
}
\value{
An object of class \link{crs} of length 2.
}
\description{
This function is a wrapper for \link[sf:st_crs]{sf::st_crs}, unless
threating numeric \code{character} strings as integers, and
accepting also UTM timezones, paths of spatial files and paths of
text files containing WKT like .prj (see details) .
}
\details{
See \link[sf:st_crs]{sf::st_crs} for details.
}
\note{
License: GPL 3.0
}
\examples{
## CRS from EPSG
st_crs2(32609)
st_crs2("EPSG:32609")
## CRS from UTM zone
st_crs2(9)
st_crs2("09")
st_crs2("9N")
st_crs2("09S")
## CRS from WKT (string or path)
(wkt_32n <- sf::st_as_text(sf::st_crs(32609)))
st_crs2(wkt_32n)
writeLines(wkt_32n, wkt_32n_path <- tempfile())
st_crs2(wkt_32n_path)
\dontrun{
## CRS from spatial file path
raster_path <- system.file(
"extdata/out/S2A2A_20190723_022_Barbellino_BOA_10.tif",
package="sen2r"
)
vector_path <- system.file(
"extdata/vector/barbellino.geojson",
package="sen2r"
)
try( st_crs2(raster_path) )
st_crs2(vector_path)
## CRS from spatial files
st_crs2(stars::read_stars(raster_path))
st_crs2(sf::read_sf(vector_path))
## CRS from PROJ.4 string
# (avoid using this with PROJ >= 6!)
st_crs2("+init=epsg:32609") # this makes use of the EPSG code
st_crs2("+proj=utm +zone=9 +datum=WGS84 +units=m +no_defs")
st_crs2(raster::raster(raster_path)) # st_crs(raster) uses the PROJ.4 as input
}
}
\references{
L. Ranghetti, M. Boschetti, F. Nutini, L. Busetto (2020).
"sen2r": An R toolbox for automatically downloading and preprocessing
Sentinel-2 satellite data. \emph{Computers & Geosciences}, 139, 104473.
\doi{10.1016/j.cageo.2020.104473}, URL: \url{https://sen2r.ranghetti.info/}.
}
\author{
Luigi Ranghetti, phD (2019)
}
|
\alias{cairo-png-functions}
\alias{cairo_read_func_t}
\alias{cairo_write_func_t}
\name{cairo-png-functions}
\title{PNG Support}
\description{Reading and writing PNG images}
\section{Methods and Functions}{
\code{\link{cairoImageSurfaceCreateFromPng}(filename)}\cr
\code{\link{cairoImageSurfaceCreateFromPngStream}(con)}\cr
\code{\link{cairoSurfaceWriteToPng}(surface, filename)}\cr
\code{\link{cairoSurfaceWriteToPngStream}(surface, con)}\cr
}
\section{Detailed Description}{The PNG functions allow reading PNG images into image surfaces, and writing
any surface to a PNG file.}
\section{User Functions}{\describe{
\item{\code{cairo_read_func_t(closure, data, length)}}{
\verb{cairo_read_func_t} is the type of function which is called when a
backend needs to read data from an input stream. It is passed the
closure which was specified by the user at the time the read
function was registered, the buffer to read the data into and the
length of the data in bytes. The read function should return
\code{CAIRO_STATUS_SUCCESS} if all the data was successfully read,
\code{CAIRO_STATUS_READ_ERROR} otherwise.
\describe{
\item{\code{closure}}{[R object] the input closure}
\item{\code{data}}{[char] the buffer into which to read the data}
\item{\code{length}}{[integer] the amount of data to read}
}
\emph{Returns:} [\code{\link{CairoStatus}}] the status code of the read operation
}
\item{\code{cairo_write_func_t(closure, data, length)}}{
\code{\link{CairoWriteFunc}} is the type of function which is called when a
backend needs to write data to an output stream. It is passed the
closure which was specified by the user at the time the write
function was registered, the data to write and the length of the
data in bytes. The write function should return
\code{CAIRO_STATUS_SUCCESS} if all the data was successfully written,
\code{CAIRO_STATUS_WRITE_ERROR} otherwise.
\describe{
\item{\code{closure}}{[R object] the output closure}
\item{\code{data}}{[char] the buffer containing the data to write}
\item{\code{length}}{[integer] the amount of data to write}
}
\emph{Returns:} [\code{\link{CairoStatus}}] the status code of the write operation
}
}}
\references{\url{http://www.cairographics.org/manual/cairo-png-functions.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/cairo-png-functions.Rd | no_license | hjy1210/RGtk2 | R | false | false | 2,321 | rd | \alias{cairo-png-functions}
\alias{cairo_read_func_t}
\alias{cairo_write_func_t}
\name{cairo-png-functions}
\title{PNG Support}
\description{Reading and writing PNG images}
\section{Methods and Functions}{
\code{\link{cairoImageSurfaceCreateFromPng}(filename)}\cr
\code{\link{cairoImageSurfaceCreateFromPngStream}(con)}\cr
\code{\link{cairoSurfaceWriteToPng}(surface, filename)}\cr
\code{\link{cairoSurfaceWriteToPngStream}(surface, con)}\cr
}
\section{Detailed Description}{The PNG functions allow reading PNG images into image surfaces, and writing
any surface to a PNG file.}
\section{User Functions}{\describe{
\item{\code{cairo_read_func_t(closure, data, length)}}{
\verb{cairo_read_func_t} is the type of function which is called when a
backend needs to read data from an input stream. It is passed the
closure which was specified by the user at the time the read
function was registered, the buffer to read the data into and the
length of the data in bytes. The read function should return
\code{CAIRO_STATUS_SUCCESS} if all the data was successfully read,
\code{CAIRO_STATUS_READ_ERROR} otherwise.
\describe{
\item{\code{closure}}{[R object] the input closure}
\item{\code{data}}{[char] the buffer into which to read the data}
\item{\code{length}}{[integer] the amount of data to read}
}
\emph{Returns:} [\code{\link{CairoStatus}}] the status code of the read operation
}
\item{\code{cairo_write_func_t(closure, data, length)}}{
\code{\link{CairoWriteFunc}} is the type of function which is called when a
backend needs to write data to an output stream. It is passed the
closure which was specified by the user at the time the write
function was registered, the data to write and the length of the
data in bytes. The write function should return
\code{CAIRO_STATUS_SUCCESS} if all the data was successfully written,
\code{CAIRO_STATUS_WRITE_ERROR} otherwise.
\describe{
\item{\code{closure}}{[R object] the output closure}
\item{\code{data}}{[char] the buffer containing the data to write}
\item{\code{length}}{[integer] the amount of data to write}
}
\emph{Returns:} [\code{\link{CairoStatus}}] the status code of the write operation
}
}}
\references{\url{http://www.cairographics.org/manual/cairo-png-functions.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
library(shiny)
library(ggplot2)
library(datasets)
dataset <- iris
shinyUI(pageWithSidebar(
titlePanel(title = h4("Iris dataset plotter ", align="center")),
sidebarPanel(
helpText("This selection enables to select between different graph tipes from ggplot2 library and a common histogram"),
selectInput('GraphType', 'Graph type', c("Point","Histogram")),
helpText("The slider can be used to modify the number of item which are plotted"),
sliderInput('sampleSize', 'Sample Size', min=10, max=nrow(dataset),
value=min(10, nrow(dataset)), step=5, round=0),
helpText("Please select the X variable:"),
selectInput('x', 'X', names(dataset)),
helpText("Please select the Y variable:"),
selectInput('y', 'Y', names(dataset), names(dataset)[[2]]),
helpText("Select variable used to define the color ramps"),
selectInput('color', 'Color', c('None', names(dataset))),
helpText("Select the geometry type to use:"),
selectInput('geoType', 'GeometryType', c("Point","Line","Area","Box","Col","Density")),
helpText("Activate / Deactivate smoothing :"),
checkboxInput('smooth', 'Smooth')
),
mainPanel(
helpText("DOCUMENTATION: The IRIS dataset is perhaps the best known database to be found in the pattern recognition literature. .
The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant. One class is linearly separable from the other 2; the latter are NOT linearly separable from each other. The attributes are
the sepal length and width in cm, the petal length and width in cm and the different classes: Iris Setosa, Iris Versicolour and Iris Virginica"),
plotOutput('plot')
)
)) | /ui.R | no_license | tomas-aguado/DPAS4 | R | false | false | 1,725 | r | library(shiny)
library(ggplot2)
library(datasets)
dataset <- iris
shinyUI(pageWithSidebar(
titlePanel(title = h4("Iris dataset plotter ", align="center")),
sidebarPanel(
helpText("This selection enables to select between different graph tipes from ggplot2 library and a common histogram"),
selectInput('GraphType', 'Graph type', c("Point","Histogram")),
helpText("The slider can be used to modify the number of item which are plotted"),
sliderInput('sampleSize', 'Sample Size', min=10, max=nrow(dataset),
value=min(10, nrow(dataset)), step=5, round=0),
helpText("Please select the X variable:"),
selectInput('x', 'X', names(dataset)),
helpText("Please select the Y variable:"),
selectInput('y', 'Y', names(dataset), names(dataset)[[2]]),
helpText("Select variable used to define the color ramps"),
selectInput('color', 'Color', c('None', names(dataset))),
helpText("Select the geometry type to use:"),
selectInput('geoType', 'GeometryType', c("Point","Line","Area","Box","Col","Density")),
helpText("Activate / Deactivate smoothing :"),
checkboxInput('smooth', 'Smooth')
),
mainPanel(
helpText("DOCUMENTATION: The IRIS dataset is perhaps the best known database to be found in the pattern recognition literature. .
The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant. One class is linearly separable from the other 2; the latter are NOT linearly separable from each other. The attributes are
the sepal length and width in cm, the petal length and width in cm and the different classes: Iris Setosa, Iris Versicolour and Iris Virginica"),
plotOutput('plot')
)
)) |
# This script plots total rodent energy over time
# This script will no longer work -- data folder has been removed
library(dplyr)
dat = read.csv('data/monthly_E_controls.csv')
dat = dat[,names(dat) != 'period']
totals = rowSums(dat)
totalE = data.frame(total = totals,period = rownames(dat))
periodinfo = read.csv('data/Period_dates_single.csv')
periodinfo$date = as.Date(periodinfo$date,format='%m/%d/%Y')
totalE = merge(totalE,periodinfo,by='period')
totalE = filter(totalE,plots>22)
totalE = totalE[order(totalE$date),]
# =================================================
# plot
plot(totalE$date,totalE$total,xlab='',ylab='Total energy',main='Total energy through time')
abline(h=mean(totalE$total))
plot(totalE$date,log(totalE$total),xlab='',ylab='Total energy',main='Total energy through time')
lines(totalE$date,log(totalE$total))
abline(h=mean(log(totalE$total)))
abline(h=mean(log(totalE$total))-2*sd(log(totalE$total)),lty=3)
abline(h=mean(log(totalE$total))+2*sd(log(totalE$total)),lty=3)
# extreme low events
extreme = totalE[log(totalE$total)<(mean(log(totalE$total))-2*sd(log(totalE$total))),]
points(extreme$date,log(extreme$total),col='red',pch=20)
| /total_energy_ts.r | no_license | emchristensen/rodent_analysis | R | false | false | 1,178 | r | # This script plots total rodent energy over time
# This script will no longer work -- data folder has been removed
library(dplyr)
dat = read.csv('data/monthly_E_controls.csv')
dat = dat[,names(dat) != 'period']
totals = rowSums(dat)
totalE = data.frame(total = totals,period = rownames(dat))
periodinfo = read.csv('data/Period_dates_single.csv')
periodinfo$date = as.Date(periodinfo$date,format='%m/%d/%Y')
totalE = merge(totalE,periodinfo,by='period')
totalE = filter(totalE,plots>22)
totalE = totalE[order(totalE$date),]
# =================================================
# plot
plot(totalE$date,totalE$total,xlab='',ylab='Total energy',main='Total energy through time')
abline(h=mean(totalE$total))
plot(totalE$date,log(totalE$total),xlab='',ylab='Total energy',main='Total energy through time')
lines(totalE$date,log(totalE$total))
abline(h=mean(log(totalE$total)))
abline(h=mean(log(totalE$total))-2*sd(log(totalE$total)),lty=3)
abline(h=mean(log(totalE$total))+2*sd(log(totalE$total)),lty=3)
# extreme low events
extreme = totalE[log(totalE$total)<(mean(log(totalE$total))-2*sd(log(totalE$total))),]
points(extreme$date,log(extreme$total),col='red',pch=20)
|
# constructor
BFprobability <- function(odds, normalize = 0){
## Add denominator
if(options()$BFcheckProbabilityList){
## eliminate redundant models
if( length(odds) > 1 ){
odds = c( odds, (1/odds[1]) / (1/odds[1]) )
duplicates = 1:length(odds)
for(i in 2:length(odds)){
for(j in 1:(i-1)){
if( odds@numerator[[i]] %same% odds@numerator[[j]] ){
duplicates[i] = j
break
}
}
}
which.denom = duplicates[length(odds)]
not.duplicate = duplicates == (1:length(odds))
not.duplicate[ which.denom ] = FALSE
# get rid of redundant models (this could be done better)
odds = odds[not.duplicate]
}
}
new("BFprobability", odds = odds,
normalize = normalize,
version = BFInfo(FALSE))
}
setValidity("BFprobability", function(object){
if( !is.numeric(object@normalize) )
return("Normalization constant must be numeric.")
if( object@normalize > 0 )
return("Normalization constant must be a valid probability.")
odds = object@odds
## Add denominator
if(options()$BFcheckProbabilityList){
if( length(odds) > 1 ){
odds = c( odds, (1/odds[1]) / (1/odds[1]) )
duplicates = 1:length(odds)
for(i in 2:length(odds)){
for(j in 1:(i-1)){
if( odds@numerator[[i]] %same% odds@numerator[[j]] ){
return("Duplicate models not allowed in probability objects.")
}
}
}
}
}
return(TRUE)
})
setMethod('show', "BFprobability", function(object){
odds = object@odds
is.prior = is.null(object@odds@bayesFactor)
if(is.prior){
cat("Prior probabilities\n--------------\n")
}else{
cat("Posterior probabilities\n--------------\n")
}
logprobs = extractProbabilities(object, logprobs = TRUE)
logprobs$probs = sapply(logprobs$probs, expString)
indices = paste("[",1:length(object),"]",sep="")
# pad model names
nms = paste(indices,rownames(logprobs),sep=" ")
maxwidth = max(nchar(nms))
nms = str_pad(nms,maxwidth,side="right",pad=" ")
# pad Bayes factors
maxwidth = max(nchar(logprobs$probs))
probString = str_pad(logprobs$probs,maxwidth,side="right",pad=" ")
for(i in 1:nrow(logprobs)){
if(is.prior){
cat(nms[i]," : ",probString[i],"\n",sep="")
}else{
cat(nms[i]," : ",probString[i]," \u00B1",round(logprobs$error[i]*100,2),"%\n",sep="")
}
}
cat("\nNormalized probability: ", expString(object@normalize), " \n")
cat("---\nModel type: ",class(object@odds@denominator)[1],", ",object@odds@denominator@type,"\n\n",sep="")
})
setMethod('summary', "BFprobability", function(object){
show(object)
})
#' @rdname extractProbabilities-methods
#' @aliases extractProbabilities,BFprobability-method
setMethod("extractProbabilities", "BFprobability", function(x, logprobs = FALSE, onlyprobs = FALSE){
norm = x@normalize
odds = x@odds
if( (length(odds) > 1 ) | !( odds@numerator[[1]] %same% odds@denominator ) ){
odds = c(odds, (1/odds[1])/(1/odds[1]))
x = extractOdds(odds, logodds = TRUE)
logsumodds = logMeanExpLogs(x$odds) + log(length(x$odds))
logp = x$odds - logsumodds + norm
z = data.frame(probs = logp, error = NA)
}else{ # numerator and denominator are the same
x = extractOdds(odds, logodds = TRUE)
z = data.frame(probs = norm, error = NA)
}
rownames(z) = rownames(x)
if(!logprobs) z$probs = exp(z$probs)
if(onlyprobs) z = z$probs
return(z)
})
#' @rdname BFprobability-class
#' @name /,BFprobability,numeric-method
#' @param e1 BFprobability object
#' @param e2 new normalization constant
setMethod('/', signature("BFprobability", "numeric"), function(e1, e2){
if(e2 > 1 | e2 <= 0)
stop("Normalization constant must be >0 and not >1")
return(e1 - log(e2))
}
)
#' @rdname BFprobability-class
#' @name -,BFprobability,numeric-method
setMethod('-', signature("BFprobability", "numeric"), function(e1, e2){
if(length(e2)>1) stop("Normalization constant must be a scalar.")
if(e2 > 0 | e2 == -Inf)
stop("Normalization constant must be >0 and not >1")
e1@normalize = e2
return(e1)
}
)
#' @rdname BFprobability-class
#' @name [,BFprobability,index,missing,missing-method
#' @param x BFprobability object
#' @param i indices indicating elements to extract
#' @param j unused for BFprobability objects
#' @param drop unused
#' @param ... further arguments passed to related methods
setMethod("[", signature(x = "BFprobability", i = "index", j = "missing",
drop = "missing"),
function (x, i, j, ..., drop) {
if((na <- nargs()) == 2){
if(is.logical(i)){
if(any(i)){
i = (1:length(i))[i]
}else{
return(NULL)
}
}
i = unique(i)
norm = x@normalize
logprobs = extractProbabilities(x, logprobs = TRUE)[i, ,drop=FALSE]
sumlogprobs = logMeanExpLogs(logprobs$probs) + log(nrow(logprobs))
if(length(x) == length(i) ){
newnorm = norm
}else if( length(i) == 1){
newnorm = sumlogprobs
}else{
newnorm = norm + sumlogprobs
}
whichnum = i[1:max(1, length(i)-1)]
whichdenom = i[length(i)]
newodds = c(x@odds, (1/x@odds[1])/(1/x@odds[1]))
newodds = newodds[whichnum] / newodds[whichdenom]
x = BFprobability( newodds, newnorm )
}else stop("invalid nargs()= ",na)
return(x)
})
#' @rdname BFprobability-class
#' @name filterBF,BFprobability,character-method
#' @param name regular expression to search name
#' @param perl logical. Should perl-compatible regexps be used? See ?grepl for details.
#' @param fixed logical. If TRUE, pattern is a string to be matched as is. See ?grepl for details.
setMethod("filterBF", signature(x = "BFprobability", name = "character"),
function (x, name, perl, fixed, ...) {
my.names = names(x)
matches = sapply(name, function(el){
grepl(el, my.names, fixed = fixed, perl = perl)
})
any.matches = apply(matches, 1, any)
x[any.matches]
}
)
######
# S3
######
##' This function coerces objects to the BFprobability class
##'
##' Function to coerce objects to the BFprobability class
##'
##' Currently, this function will only work with objects of class
##' \code{BFOdds}.
##' @title Function to coerce objects to the BFprobability class
##' @param object an object of appropriate class (BFodds)
##' @param normalize the sum of the probabilities for all models in the object (1 by default)
##' @param lognormalize alternative to \code{normalize}; the
##' logarithm of the normalization constant (0 by default)
##' @return An object of class \code{BFprobability}
##' @author Richard D. Morey (\email{richarddmorey@@gmail.com})
##' @export
##' @keywords misc
as.BFprobability <- function(object, normalize = NULL, lognormalize = NULL)
UseMethod("as.BFprobability")
length.BFprobability <- function(x)
nrow(extractProbabilities(x))
names.BFprobability <- function(x) {
rownames(extractProbabilities(x))
}
# See http://www-stat.stanford.edu/~jmc4/classInheritance.pdf
sort.BFprobability <- function(x, decreasing = FALSE, ...){
ord = order(extractProbabilities(x, logprobs=TRUE)$probs, decreasing = decreasing)
return(x[ord])
}
max.BFprobability <- function(..., na.rm=FALSE){
if(nargs()>2) stop("Cannot concatenate probability objects.")
el <- head(list(...)[[1]], n=1)
return(el)
}
min.BFprobability <- function(..., na.rm=FALSE){
if(nargs()>2) stop("Cannot concatenate probability objects.")
el <- tail(list(...)[[1]], n=1)
return(el)
}
which.max.BFprobability <- function(x){
index = which.max(extractProbabilities(x, logprobs=TRUE)$probs)
names(index) = names(x)[index]
return(index)
}
which.min.BFprobability <- function(x){
index = which.min(extractProbabilities(x, logprobs=TRUE)$probs)
names(index) = names(x)[index]
return(index)
}
head.BFprobability <- function(x, n=6L, ...){
n = ifelse(n>length(x),length(x),n)
x = sort(x, decreasing=TRUE)
return(x[1:n])
}
tail.BFprobability <- function(x, n=6L, ...){
n = ifelse(n>length(x),length(x),n)
x = sort(x)
return(x[n:1])}
as.data.frame.BFprobability <- function(x, row.names = NULL, optional=FALSE,...){
df = extractProbabilities(x)
return(df)
}
as.vector.BFprobability <- function(x, mode = "any"){
if( !(mode %in% c("any", "numeric"))) stop("Cannot coerce to mode ", mode)
v = extractProbabilities(x)$probs
names(v) = names(x)
return(v)
}
sum.BFprobability <-
function(..., na.rm = FALSE)
{
if(na.rm) warning("na.rm argument not used for BFprobability objects.")
sapply(list(...), function(el){
if(is(el, "BFprobability")){
return(exp(el@normalize))
}else{
return(NA)
}
}, USE.NAMES = FALSE)
}
| /pkg/BayesFactor/R/methods-BFprobability.R | no_license | Ax3man/BayesFactor | R | false | false | 9,100 | r | # constructor
BFprobability <- function(odds, normalize = 0){
## Add denominator
if(options()$BFcheckProbabilityList){
## eliminate redundant models
if( length(odds) > 1 ){
odds = c( odds, (1/odds[1]) / (1/odds[1]) )
duplicates = 1:length(odds)
for(i in 2:length(odds)){
for(j in 1:(i-1)){
if( odds@numerator[[i]] %same% odds@numerator[[j]] ){
duplicates[i] = j
break
}
}
}
which.denom = duplicates[length(odds)]
not.duplicate = duplicates == (1:length(odds))
not.duplicate[ which.denom ] = FALSE
# get rid of redundant models (this could be done better)
odds = odds[not.duplicate]
}
}
new("BFprobability", odds = odds,
normalize = normalize,
version = BFInfo(FALSE))
}
setValidity("BFprobability", function(object){
if( !is.numeric(object@normalize) )
return("Normalization constant must be numeric.")
if( object@normalize > 0 )
return("Normalization constant must be a valid probability.")
odds = object@odds
## Add denominator
if(options()$BFcheckProbabilityList){
if( length(odds) > 1 ){
odds = c( odds, (1/odds[1]) / (1/odds[1]) )
duplicates = 1:length(odds)
for(i in 2:length(odds)){
for(j in 1:(i-1)){
if( odds@numerator[[i]] %same% odds@numerator[[j]] ){
return("Duplicate models not allowed in probability objects.")
}
}
}
}
}
return(TRUE)
})
setMethod('show', "BFprobability", function(object){
odds = object@odds
is.prior = is.null(object@odds@bayesFactor)
if(is.prior){
cat("Prior probabilities\n--------------\n")
}else{
cat("Posterior probabilities\n--------------\n")
}
logprobs = extractProbabilities(object, logprobs = TRUE)
logprobs$probs = sapply(logprobs$probs, expString)
indices = paste("[",1:length(object),"]",sep="")
# pad model names
nms = paste(indices,rownames(logprobs),sep=" ")
maxwidth = max(nchar(nms))
nms = str_pad(nms,maxwidth,side="right",pad=" ")
# pad Bayes factors
maxwidth = max(nchar(logprobs$probs))
probString = str_pad(logprobs$probs,maxwidth,side="right",pad=" ")
for(i in 1:nrow(logprobs)){
if(is.prior){
cat(nms[i]," : ",probString[i],"\n",sep="")
}else{
cat(nms[i]," : ",probString[i]," \u00B1",round(logprobs$error[i]*100,2),"%\n",sep="")
}
}
cat("\nNormalized probability: ", expString(object@normalize), " \n")
cat("---\nModel type: ",class(object@odds@denominator)[1],", ",object@odds@denominator@type,"\n\n",sep="")
})
setMethod('summary', "BFprobability", function(object){
show(object)
})
#' @rdname extractProbabilities-methods
#' @aliases extractProbabilities,BFprobability-method
setMethod("extractProbabilities", "BFprobability", function(x, logprobs = FALSE, onlyprobs = FALSE){
norm = x@normalize
odds = x@odds
if( (length(odds) > 1 ) | !( odds@numerator[[1]] %same% odds@denominator ) ){
odds = c(odds, (1/odds[1])/(1/odds[1]))
x = extractOdds(odds, logodds = TRUE)
logsumodds = logMeanExpLogs(x$odds) + log(length(x$odds))
logp = x$odds - logsumodds + norm
z = data.frame(probs = logp, error = NA)
}else{ # numerator and denominator are the same
x = extractOdds(odds, logodds = TRUE)
z = data.frame(probs = norm, error = NA)
}
rownames(z) = rownames(x)
if(!logprobs) z$probs = exp(z$probs)
if(onlyprobs) z = z$probs
return(z)
})
#' @rdname BFprobability-class
#' @name /,BFprobability,numeric-method
#' @param e1 BFprobability object
#' @param e2 new normalization constant
setMethod('/', signature("BFprobability", "numeric"), function(e1, e2){
if(e2 > 1 | e2 <= 0)
stop("Normalization constant must be >0 and not >1")
return(e1 - log(e2))
}
)
#' @rdname BFprobability-class
#' @name -,BFprobability,numeric-method
setMethod('-', signature("BFprobability", "numeric"), function(e1, e2){
if(length(e2)>1) stop("Normalization constant must be a scalar.")
if(e2 > 0 | e2 == -Inf)
stop("Normalization constant must be >0 and not >1")
e1@normalize = e2
return(e1)
}
)
#' @rdname BFprobability-class
#' @name [,BFprobability,index,missing,missing-method
#' @param x BFprobability object
#' @param i indices indicating elements to extract
#' @param j unused for BFprobability objects
#' @param drop unused
#' @param ... further arguments passed to related methods
setMethod("[", signature(x = "BFprobability", i = "index", j = "missing",
drop = "missing"),
function (x, i, j, ..., drop) {
if((na <- nargs()) == 2){
if(is.logical(i)){
if(any(i)){
i = (1:length(i))[i]
}else{
return(NULL)
}
}
i = unique(i)
norm = x@normalize
logprobs = extractProbabilities(x, logprobs = TRUE)[i, ,drop=FALSE]
sumlogprobs = logMeanExpLogs(logprobs$probs) + log(nrow(logprobs))
if(length(x) == length(i) ){
newnorm = norm
}else if( length(i) == 1){
newnorm = sumlogprobs
}else{
newnorm = norm + sumlogprobs
}
whichnum = i[1:max(1, length(i)-1)]
whichdenom = i[length(i)]
newodds = c(x@odds, (1/x@odds[1])/(1/x@odds[1]))
newodds = newodds[whichnum] / newodds[whichdenom]
x = BFprobability( newodds, newnorm )
}else stop("invalid nargs()= ",na)
return(x)
})
#' @rdname BFprobability-class
#' @name filterBF,BFprobability,character-method
#' @param name regular expression to search name
#' @param perl logical. Should perl-compatible regexps be used? See ?grepl for details.
#' @param fixed logical. If TRUE, pattern is a string to be matched as is. See ?grepl for details.
setMethod("filterBF", signature(x = "BFprobability", name = "character"),
function (x, name, perl, fixed, ...) {
my.names = names(x)
matches = sapply(name, function(el){
grepl(el, my.names, fixed = fixed, perl = perl)
})
any.matches = apply(matches, 1, any)
x[any.matches]
}
)
######
# S3
######
##' This function coerces objects to the BFprobability class
##'
##' Function to coerce objects to the BFprobability class
##'
##' Currently, this function will only work with objects of class
##' \code{BFOdds}.
##' @title Function to coerce objects to the BFprobability class
##' @param object an object of appropriate class (BFodds)
##' @param normalize the sum of the probabilities for all models in the object (1 by default)
##' @param lognormalize alternative to \code{normalize}; the
##' logarithm of the normalization constant (0 by default)
##' @return An object of class \code{BFprobability}
##' @author Richard D. Morey (\email{richarddmorey@@gmail.com})
##' @export
##' @keywords misc
as.BFprobability <- function(object, normalize = NULL, lognormalize = NULL)
UseMethod("as.BFprobability")
length.BFprobability <- function(x)
nrow(extractProbabilities(x))
names.BFprobability <- function(x) {
rownames(extractProbabilities(x))
}
# See http://www-stat.stanford.edu/~jmc4/classInheritance.pdf
sort.BFprobability <- function(x, decreasing = FALSE, ...){
ord = order(extractProbabilities(x, logprobs=TRUE)$probs, decreasing = decreasing)
return(x[ord])
}
max.BFprobability <- function(..., na.rm=FALSE){
if(nargs()>2) stop("Cannot concatenate probability objects.")
el <- head(list(...)[[1]], n=1)
return(el)
}
min.BFprobability <- function(..., na.rm=FALSE){
if(nargs()>2) stop("Cannot concatenate probability objects.")
el <- tail(list(...)[[1]], n=1)
return(el)
}
which.max.BFprobability <- function(x){
index = which.max(extractProbabilities(x, logprobs=TRUE)$probs)
names(index) = names(x)[index]
return(index)
}
which.min.BFprobability <- function(x){
index = which.min(extractProbabilities(x, logprobs=TRUE)$probs)
names(index) = names(x)[index]
return(index)
}
head.BFprobability <- function(x, n=6L, ...){
n = ifelse(n>length(x),length(x),n)
x = sort(x, decreasing=TRUE)
return(x[1:n])
}
tail.BFprobability <- function(x, n=6L, ...){
n = ifelse(n>length(x),length(x),n)
x = sort(x)
return(x[n:1])}
as.data.frame.BFprobability <- function(x, row.names = NULL, optional=FALSE,...){
df = extractProbabilities(x)
return(df)
}
as.vector.BFprobability <- function(x, mode = "any"){
if( !(mode %in% c("any", "numeric"))) stop("Cannot coerce to mode ", mode)
v = extractProbabilities(x)$probs
names(v) = names(x)
return(v)
}
sum.BFprobability <-
function(..., na.rm = FALSE)
{
if(na.rm) warning("na.rm argument not used for BFprobability objects.")
sapply(list(...), function(el){
if(is(el, "BFprobability")){
return(exp(el@normalize))
}else{
return(NA)
}
}, USE.NAMES = FALSE)
}
|
dt_simN<-read.table("simulation_attach_noise.txt", header=T, sep="\t");
plot(c(0,max(dt_simN[,1]+50)), c(min(dt_simN[,2]),max(dt_simN[,2])), main="simulated", col=2, type="n");
lines(dt_simN[,1], dt_simN[,2], col=8);
dt_sim<-read.table("simulation_attach.txt", header=T, sep="\t");
lines(dt_sim[,1], dt_sim[,2], col=6);
#####for displaying the testing file for linear regression
#simulation detached
dt_simN_de<-read.table("simulation_detach_noise.txt", header=T, sep="\t");
dt_simN_de_short<-read.table("simulation_detach_noise_short.txt", header=T, sep="\t");
dt_simN_de_smoothed<-read.table("simulation_detach_noise_smoothed.txt", header=T, sep="\t");
dt_simN_de_slope<-read.table("simulation_detach_noise_slope.txt", header=T, sep="\t");
invDt_dt=1/dt_simN_de_slope[,2];
invR=1/dt_simN_de_smoothed[,2]
jpeg(filename="plot.jpg", width = 1200, height = 1200, pointsize = 16)
plot(c(0,max(dt_simN_de[,1]+50)), c(min(dt_simN_de[,2]),max(dt_simN_de[,2])), main="simulated", col=2, type="l");
lines(dt_simN_de[,1], dt_simN_de[,2], col=8);
lines(dt_simN_de_short[,1], dt_simN_de_short[,2], col=9);
lines(dt_simN_de_smoothed[,1], dt_simN_de_smoothed[,2], col=2);
plot(dt_simN_de_slope[,1], dt_simN_de_slope[,2], main="slope vs time", col=2, type="p");
plot(dt_simN_de_smoothed[,2], dt_simN_de_slope[,2], main="slope vs response", col=2, type="p");
plot(invR, invDt_dt, main="inv dr/dt vs inv RU", col=2, type="p");
par(op)
dev.off();
regOut<-lm(invDt_dt~ invR)
summary(regOut);
n=1;
regOut<-lm(invDt_dt[c(n:330)]~ invR[c(n:330)])
summary(regOut);
setwd("E:/MSI_software/SPR/simulation/MH/MH 2000000");
setwd("E:/MSI_software/SPR/step0.005")
setwd("E:/MSI_software/SPR/BayesianEstimationAffinityConstant/Testing/bin/Debug")
setwd("E:/MSI_software/SPR/BayesianEstimationAffinityConstant/bayestimateCon/bin/Debug")
dt_mc<-read.table("MCMC_run.txt", header=T, sep="\t", );
dt_mc<-dt_mc[c(40000:length(dt_mc[,1])),];
jpeg(filename="trace.jpg", width = 800, height = 2400, pointsize = 16)
op<-par( mfrow = c( 7, 1) )
plot(dt_mc[,1], dt_mc$ka, main="ka", col=2, type="l");
plot(dt_mc[,1], dt_mc$kd, main="kd", col=2, type="l");
plot(dt_mc[,1], dt_mc$kM, main="kM", col=2, type="l");
plot(dt_mc[,1], dt_mc$conc, main="conc", col=2, type="l");
plot(dt_mc[,1], dt_mc$Rmax, main="Rmax", col=2, type="l");
plot(dt_mc[,1], dt_mc$R0, main="R0", col=2, type="l");
plot(dt_mc[,1], dt_mc$curLLD, main="LLD", col=2, type="l");
par(op)
dev.off();
kc<-dt_mc$ka*dt_mc$conc;
mean(dt_mc$ka)
mean(dt_mc$kd)
mean(dt_mc$kM)
mean(dt_mc$conc)
mean(dt_mc$Rmax)
mean(dt_mc$R0)
mean(dt_mc$Sigma)
mean(kc);
plot(dt_mc[,1], kc, main="ka*conc", col=2, type="l");
k1<- mean(dt_mc$conc)* mean(dt_mc$ka)/ mean(dt_mc$kM)
k2<-1.5E3*1.9E-6/1E6
############displaying the fitting results for comparison
jpeg(filename="fitting.jpg", width = 800, height = 2400, pointsize = 16)
op<-par( mfrow = c( 2, 1) )
dt_simN<-read.table("simulation_attach_noise.txt", header=T, sep="\t");
plot(c(0,max(dt_simN[,1]+600)), c(min(dt_simN[,2]),max(dt_simN[,2])), main="simulated", col=2, type="n");
lines(dt_simN[,1], dt_simN[,2], col=8);
dt_simN<-read.table("simulation_detach_noise.txt", header=T, sep="\t");
lines(dt_simN[,1]+400, dt_simN[,2], col=8);
dt_sim<-read.table("simulation_attach.txt", header=T, sep="\t");
#lines(dt_sim[,1], dt_sim[,2], col=2);
points(dt_sim[,1], dt_sim[,2], col=2);
dt_sim<-read.table("simulation_detach.txt", header=T, sep="\t");
#lines(dt_sim[,1], dt_sim[,2], col=2);
points(dt_sim[,1]+400, dt_sim[,2], col=2);
###now for fitting, as means
dt_simMean<-read.table("simulationMean.txt", header=T, sep="\t");
points(dt_simMean[,1], dt_simMean[,2], col=3);
dt_simMean<-read.table("simulationMean_detach.txt", header=T, sep="\t");
points(dt_simMean[,1]+400, dt_simMean[,2], col=3);
###now for fitting, as last set
dt_simLast<-read.table("simulationLast.txt", header=T, sep="\t");
lines(dt_simLast[,1], dt_simLast[,2], col=4, lty=2);
dt_simLast<-read.table("simulationLast_detach.txt", header=T, sep="\t");
lines(dt_simLast[,1]+400, dt_simLast[,2], col=4, lty=2);
lines(t, R[c(1:(length(R)-1))], col=5);
dev.off();
| /bayestimateCon/Rcode.r | no_license | ffeng23/BayesianEstimationAffinityConstant | R | false | false | 4,237 | r |
dt_simN<-read.table("simulation_attach_noise.txt", header=T, sep="\t");
plot(c(0,max(dt_simN[,1]+50)), c(min(dt_simN[,2]),max(dt_simN[,2])), main="simulated", col=2, type="n");
lines(dt_simN[,1], dt_simN[,2], col=8);
dt_sim<-read.table("simulation_attach.txt", header=T, sep="\t");
lines(dt_sim[,1], dt_sim[,2], col=6);
#####for displaying the testing file for linear regression
#simulation detached
dt_simN_de<-read.table("simulation_detach_noise.txt", header=T, sep="\t");
dt_simN_de_short<-read.table("simulation_detach_noise_short.txt", header=T, sep="\t");
dt_simN_de_smoothed<-read.table("simulation_detach_noise_smoothed.txt", header=T, sep="\t");
dt_simN_de_slope<-read.table("simulation_detach_noise_slope.txt", header=T, sep="\t");
invDt_dt=1/dt_simN_de_slope[,2];
invR=1/dt_simN_de_smoothed[,2]
jpeg(filename="plot.jpg", width = 1200, height = 1200, pointsize = 16)
plot(c(0,max(dt_simN_de[,1]+50)), c(min(dt_simN_de[,2]),max(dt_simN_de[,2])), main="simulated", col=2, type="l");
lines(dt_simN_de[,1], dt_simN_de[,2], col=8);
lines(dt_simN_de_short[,1], dt_simN_de_short[,2], col=9);
lines(dt_simN_de_smoothed[,1], dt_simN_de_smoothed[,2], col=2);
plot(dt_simN_de_slope[,1], dt_simN_de_slope[,2], main="slope vs time", col=2, type="p");
plot(dt_simN_de_smoothed[,2], dt_simN_de_slope[,2], main="slope vs response", col=2, type="p");
plot(invR, invDt_dt, main="inv dr/dt vs inv RU", col=2, type="p");
par(op)
dev.off();
regOut<-lm(invDt_dt~ invR)
summary(regOut);
n=1;
regOut<-lm(invDt_dt[c(n:330)]~ invR[c(n:330)])
summary(regOut);
setwd("E:/MSI_software/SPR/simulation/MH/MH 2000000");
setwd("E:/MSI_software/SPR/step0.005")
setwd("E:/MSI_software/SPR/BayesianEstimationAffinityConstant/Testing/bin/Debug")
setwd("E:/MSI_software/SPR/BayesianEstimationAffinityConstant/bayestimateCon/bin/Debug")
dt_mc<-read.table("MCMC_run.txt", header=T, sep="\t", );
dt_mc<-dt_mc[c(40000:length(dt_mc[,1])),];
jpeg(filename="trace.jpg", width = 800, height = 2400, pointsize = 16)
op<-par( mfrow = c( 7, 1) )
plot(dt_mc[,1], dt_mc$ka, main="ka", col=2, type="l");
plot(dt_mc[,1], dt_mc$kd, main="kd", col=2, type="l");
plot(dt_mc[,1], dt_mc$kM, main="kM", col=2, type="l");
plot(dt_mc[,1], dt_mc$conc, main="conc", col=2, type="l");
plot(dt_mc[,1], dt_mc$Rmax, main="Rmax", col=2, type="l");
plot(dt_mc[,1], dt_mc$R0, main="R0", col=2, type="l");
plot(dt_mc[,1], dt_mc$curLLD, main="LLD", col=2, type="l");
par(op)
dev.off();
kc<-dt_mc$ka*dt_mc$conc;
mean(dt_mc$ka)
mean(dt_mc$kd)
mean(dt_mc$kM)
mean(dt_mc$conc)
mean(dt_mc$Rmax)
mean(dt_mc$R0)
mean(dt_mc$Sigma)
mean(kc);
plot(dt_mc[,1], kc, main="ka*conc", col=2, type="l");
k1<- mean(dt_mc$conc)* mean(dt_mc$ka)/ mean(dt_mc$kM)
k2<-1.5E3*1.9E-6/1E6
############displaying the fitting results for comparison
jpeg(filename="fitting.jpg", width = 800, height = 2400, pointsize = 16)
op<-par( mfrow = c( 2, 1) )
dt_simN<-read.table("simulation_attach_noise.txt", header=T, sep="\t");
plot(c(0,max(dt_simN[,1]+600)), c(min(dt_simN[,2]),max(dt_simN[,2])), main="simulated", col=2, type="n");
lines(dt_simN[,1], dt_simN[,2], col=8);
dt_simN<-read.table("simulation_detach_noise.txt", header=T, sep="\t");
lines(dt_simN[,1]+400, dt_simN[,2], col=8);
dt_sim<-read.table("simulation_attach.txt", header=T, sep="\t");
#lines(dt_sim[,1], dt_sim[,2], col=2);
points(dt_sim[,1], dt_sim[,2], col=2);
dt_sim<-read.table("simulation_detach.txt", header=T, sep="\t");
#lines(dt_sim[,1], dt_sim[,2], col=2);
points(dt_sim[,1]+400, dt_sim[,2], col=2);
###now for fitting, as means
dt_simMean<-read.table("simulationMean.txt", header=T, sep="\t");
points(dt_simMean[,1], dt_simMean[,2], col=3);
dt_simMean<-read.table("simulationMean_detach.txt", header=T, sep="\t");
points(dt_simMean[,1]+400, dt_simMean[,2], col=3);
###now for fitting, as last set
dt_simLast<-read.table("simulationLast.txt", header=T, sep="\t");
lines(dt_simLast[,1], dt_simLast[,2], col=4, lty=2);
dt_simLast<-read.table("simulationLast_detach.txt", header=T, sep="\t");
lines(dt_simLast[,1]+400, dt_simLast[,2], col=4, lty=2);
lines(t, R[c(1:(length(R)-1))], col=5);
dev.off();
|
library(RobAStBase)
### Name: MEstimate-class
### Title: MEstimate-class.
### Aliases: MEstimate-class Mroot Mroot,MEstimate-method
### show,MEstimate-method
### Keywords: classes
### ** Examples
## prototype
new("MEstimate")
| /data/genthat_extracted_code/RobAStBase/examples/MEstimate-class.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 235 | r | library(RobAStBase)
### Name: MEstimate-class
### Title: MEstimate-class.
### Aliases: MEstimate-class Mroot Mroot,MEstimate-method
### show,MEstimate-method
### Keywords: classes
### ** Examples
## prototype
new("MEstimate")
|
## ----set-options, echo=FALSE, cache=FALSE---------------------------------------------------------
options(width = 100)
## -------------------------------------------------------------------------------------------------
library("joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("simjointmeta", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
exampledat1 <- simjointmeta(k = 5, n = rep(500, 5), sepassoc = FALSE,
ntms = 5,longmeasuretimes = c(0, 1, 2, 3, 4),
beta1 = c(1, 2, 3), beta2 = 1,
rand_ind = "intslope", rand_stud = NULL,
gamma_ind = 1,
sigb_ind = matrix(c(1, 0.5, 0.5, 1.5),nrow = 2),
vare = 0.01, theta0 = -3, theta1 = 1,
censoring = TRUE, censlam = exp(-3),
truncation = FALSE,
trunctime = max(longmeasuretimes))
## ---- eval=FALSE----------------------------------------------------------------------------------
# exampledat1$percentevent
## -------------------------------------------------------------------------------------------------
str(exampledat1$longitudinal)
## -------------------------------------------------------------------------------------------------
str(exampledat1$survival)
## -------------------------------------------------------------------------------------------------
exampledat2 <- simjointmeta(k = 5, n = rep(500, 5), sepassoc = TRUE, ntms = 5,
longmeasuretimes = c(0, 1, 2, 3, 4), beta1 = c(1, 2, 3),
beta2 = 1, rand_ind = "intslope", rand_stud = "inttreat",
gamma_ind = c(0.5, 1), gamma_stud = c(0.5, 1),
sigb_ind = matrix(c(1, 0.5, 0.5, 1.5), nrow = 2),
sigb_stud = matrix(c(1, 0.5, 0.5, 1.5), nrow = 2),
vare = 0.01, theta0 = -3, theta1 = 1, censoring = TRUE,
censlam = exp(-3), truncation = FALSE,
trunctime = max(longmeasuretimes))
## -------------------------------------------------------------------------------------------------
gamma_ind_set <- list(c(0.5, 1), c(0.4, 0.9), c(0.6, 1.1), c(0.5, 0.9), c(0.4, 1.1))
gamma_stud_set <- list(c(0.6, 1.1), c(0.5, 1), c(0.5, 0.9), c(0.4, 1.1), c(0.4, 0.9))
censlamset <- c(exp(-3), exp(-2.9), exp(-3.1), exp(-3), exp(-3.05))
theta0set <- c(-3, -2.9, -3, -2.9, -3.1)
theta1set <- c(1, 0.9, 1.1, 1, 0.9)
exampledat2 <- simjointmeta(k = 5, n = rep(500, 5), sepassoc = TRUE, ntms = 5,
longmeasuretimes = c(0, 1, 2, 3, 4), beta1 = c(1, 2, 3),
beta2 = 1, rand_ind = "intslope", rand_stud = "inttreat",
gamma_ind = gamma_ind_set, gamma_stud = gamma_stud_set,
sigb_ind = matrix(c(1, 0.5, 0.5, 1.5), nrow = 2),
sigb_stud = matrix(c(1, 0.5, 0.5, 1.5), nrow = 2),
vare = 0.01, theta0 = theta0set, theta1 = theta1set,
censoring = TRUE, censlam = censlamset, truncation = FALSE,
trunctime = max(longmeasuretimes))
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("simdat2", package = "joineRmeta")
# help("simdat3", package = "joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# data("simdat2")
# str(simdat2)
## -------------------------------------------------------------------------------------------------
jointdat<-tojointdata(longitudinal = simdat2$longitudinal,
survival = simdat2$survival, id = "id", longoutcome = "Y",
timevarying = c("time","ltime"), survtime = "survtime", cens = "cens",
time = "time")
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("tojointdata", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
str(jointdat)
## -------------------------------------------------------------------------------------------------
jointdat$baseline$study <- as.factor(jointdat$baseline$study)
jointdat$baseline$treat <- as.factor(jointdat$baseline$treat)
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("removeafter", package = "joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# str(simdat3)
## -------------------------------------------------------------------------------------------------
jointdat3<-tojointdata(longitudinal = simdat3$longitudinal,
survival = simdat3$survival, id = "id", longoutcome = "Y",
timevarying = c("time","ltime"), survtime = "survtime", cens = "cens",
time = "time")
## ---- results = "hide"----------------------------------------------------------------------------
jointdat3.1<-removeafter(data = jointdat3, longitudinal = "Y",
survival = "survtime", id = "id", time = "time")
## -------------------------------------------------------------------------------------------------
str(jointdat3)
str(jointdat3.1)
## -------------------------------------------------------------------------------------------------
sepplots <- jointmetaplot(dataset = jointdat, study = "study", longoutcome = "Y",
longtime = "time", survtime = "survtime", cens = "cens",
id = "id", smoother = TRUE,
studynames = c("Study 1", "Study 2", "Study 3"), type = "Both")
## ---- fig.show='hold', fig.keep='high'------------------------------------------------------------
sepplots$longplots$`studyplot.Study 3`
sepplots$eventplots[[1]]
## -------------------------------------------------------------------------------------------------
sepplots2 <- jointmetaplot(dataset = jointdat, study = "study", longoutcome = "Y",
longtime = "time", survtime = "survtime", cens = "cens",
id = "id", smoother = TRUE,
studynames = c("Study 1", "Study 2", "Study 3"),
type = "Both", eventby = "treat")
sepplots3 <- jointmetaplot(dataset = jointdat, study = "study", longoutcome = "Y",
longtime = "time", survtime = "survtime", cens = "cens",
id = "id", smoother = TRUE,
studynames = c("Study 1", "Study 2", "Study 3"),
type = "Event", eventconfint = TRUE)
sepplots2$eventplots$`studyplot.Study 3`
sepplots3$eventplots[[2]]
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("jointmetaplot", package = "joineRmeta")
## ---- fig.show='hide'-----------------------------------------------------------------------------
allplot2 <- suppressWarnings(jointmetaplotall(plotlist = sepplots2, ncol = 2,
top = "All studies",
type = "Both"))
## ---- eval=FALSE----------------------------------------------------------------------------------
# allplot2$longall
# allplot2$eventsall
## ---- fig.height=10, fig.width=5------------------------------------------------------------------
allplot2$longall
allplot2$eventsall
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("jointmetaplotall", package = "joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("joineRfits", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
joineRmodels <- joineRfits[c("joineRfit1", "joineRfit2", "joineRfit3")]
joineRmodelsSE <- joineRfits[c("joineRfit1SE", "joineRfit2SE", "joineRfit3SE")]
## -------------------------------------------------------------------------------------------------
summary(joineRmodels[[1]])
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("joineRfits2", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
joineRmodels2 <- joineRfits2[c("joineRfit6", "joineRfit7", "joineRfit8",
"joineRfit9", "joineRfit10")]
joineRmodels2SE <- joineRfits2[c("joineRfit6SE", "joineRfit7SE", "joineRfit8SE",
"joineRfit9SE", "joineRfit10SE")]
## -------------------------------------------------------------------------------------------------
summary(joineRmodels2[[1]])
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("JMfits", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
summary(JMfits[[1]])
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("JMfits2", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
summary(JMfits2[[1]])
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("jointmeta2", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
MAjoineRfits <- jointmeta2(fits = joineRmodels, SE = joineRmodelsSE,
longpar = c("time", "treat1"),
survpar = "treat1", assoc = TRUE,
studynames = c("Study 1","Study 2", "Study 3"))
## -------------------------------------------------------------------------------------------------
names(MAjoineRfits$longMA)
## ---- fig.height=4, fig.width=9, warning = FALSE, message = FALSE---------------------------------
library(meta)
forest(MAjoineRfits$longMA$treat1)
## ---- error = TRUE--------------------------------------------------------------------------------
MAjoineRfits2 <- jointmeta2(fits = c(joineRmodels[1:3], joineRmodels2[1:2]),
SE = c(joineRmodelsSE[1:3],joineRmodels2SE[1:2]),
longpar = c("time", "treat1"), survpar = "treat1",
assoc = TRUE,
studynames = c("Study 1","Study 2", "Study 3"))
## -------------------------------------------------------------------------------------------------
MAJMfits <- jointmeta2(fits = JMfits, longpar = c("time", "treat1"),
survpar = "treat1", assoc = TRUE,
studynames = c("Study 1","Study 2", "Study 3"))
## -------------------------------------------------------------------------------------------------
MAJMfits2 <- jointmeta2(fits = JMfits2, longpar = c("time", "treat1"),
survpar = "treat1", assoc = TRUE,
studynames = c("Study 1","Study 2", "Study 3"))
## ---- error = TRUE--------------------------------------------------------------------------------
MAtest <- jointmeta2(fits = c(JMfits2[1:3], JMfits[1:2]),
longpar = c("time", "treat1"),
survpar = "treat1", assoc = TRUE,
studynames = c("Study 1","Study 2", "Study 3"))
## ---- error = TRUE--------------------------------------------------------------------------------
MAtest <- jointmeta2(fits = c(JMfits2[1:3], joineRfits[1:2]),
longpar = c("time", "treat1"),
survpar = "treat1", assoc = TRUE,
studynames = c("Study 1","Study 2", "Study 3"))
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("jointmeta1", package = "joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("onestage1", package = "joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit0 <- jointmeta1(data = jointdat,
# long.formula = Y ~ 1 + time + treat,
# long.rand.ind = c("int", "time"),
# sharingstrct = "randprop",
# surv.formula = Surv(survtime, cens) ~ treat,
# study.name = "study", strat = F)
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit1 <- jointmeta1(data = jointdat,
# long.formula = Y ~ 1 + time + treat*study,
# long.rand.ind = c("int", "time"),
# sharingstrct = "randprop",
# surv.formula = Surv(survtime, cens) ~ treat*study,
# study.name = "study", strat = F)
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("jointmeta1.object", package = "joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit1SE <- jointmetaSE(fitted = onestagefit1, n.boot = 200,
# overalleffects = list(long = list(c("treat1", "treat1:study2"),
# c("treat1", "treat1:study3")),
# surv = list(c("treat1", "treat1:study2"),
# c("treat1", "treat1:study3"))))
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("jointmeta1SE.object", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
#extract the saved model fit and bootstrap results
onestagefit1 <- onestage1$onestagefit1
onestagefit1SE <- onestage1$onestagefit1SE
## -------------------------------------------------------------------------------------------------
summary(onestagefit1)
## -------------------------------------------------------------------------------------------------
print(onestagefit1)
## -------------------------------------------------------------------------------------------------
fixef(onestagefit1, type = "Longitudinal")
## -------------------------------------------------------------------------------------------------
fixef(onestagefit1, type = "Survival")
## -------------------------------------------------------------------------------------------------
fixef(onestagefit1, type = "Latent")
## ---- eval=FALSE----------------------------------------------------------------------------------
# ranef(onestagefit1, type = "individual")
## -------------------------------------------------------------------------------------------------
rancov(onestagefit1, type = "individual")
## -------------------------------------------------------------------------------------------------
formula(onestagefit1, type = "Longitudinal")
## -------------------------------------------------------------------------------------------------
formula(onestagefit1, type = "Survival")
## -------------------------------------------------------------------------------------------------
formula(onestagefit1, type = "Rand_ind")
## -------------------------------------------------------------------------------------------------
print(onestagefit1SE)
## -------------------------------------------------------------------------------------------------
confint(onestagefit1SE)
## ---- eval=FALSE----------------------------------------------------------------------------------
# vcov(onestagefit1SE)
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("onestage2", package = "joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit2 <- jointmeta1(data = jointdat,
# long.formula = Y ~ 1 + time + treat,
# long.rand.ind = c("int", "time"),
# long.rand.stud = c("study", "treat"),
# sharingstrct = "randprop",
# surv.formula = Surv(survtime, cens) ~ treat,
# study.name = "study", strat = F)
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit2SE<-jointmetaSE(fitted = onestagefit2, n.boot = 200)
## -------------------------------------------------------------------------------------------------
#extract the saved model fit and bootstrap results
onestagefit2<-onestage2$onestagefit2
onestagefit2SE<-onestage2$onestagefit2SE
## ---- eval=FALSE----------------------------------------------------------------------------------
# ranef(onestagefit2, type = "study")
## -------------------------------------------------------------------------------------------------
rancov(onestagefit2, type = "study")
## -------------------------------------------------------------------------------------------------
formula(onestagefit2, type = "Rand_stud")
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit3 <- jointmeta1(data = jointdat,
# long.formula = Y ~ 1 + time + treat*study,
# long.rand.ind = c("int", "time"),
# sharingstrct = "randprop",
# surv.formula = Surv(survtime, cens) ~ treat,
# study.name = "study", strat = TRUE)
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit3SE <- jointmetaSE(fitted = onestagefit3, n.boot = 200,
# overalleffects = list(long = list(c("treat1", "treat1:study2"),
# c("treat1", "treat1:study3")))))
## -------------------------------------------------------------------------------------------------
#extract the saved model fit and bootstrap results
onestagefit3<-onestage3$onestagefit3
onestagefit3SE<-onestage3$onestagefit3SE
## -------------------------------------------------------------------------------------------------
summary(onestagefit3)
## -------------------------------------------------------------------------------------------------
rancov(fitted = onestagefit3, type = "individual")
## ---- error = TRUE--------------------------------------------------------------------------------
rancov(fitted = onestagefit3, type = "study")
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit4 <- jointmeta1(data = jointdat, long.formula = Y ~ 1 + time + treat + study,
# long.rand.ind = c("int", "time"), long.rand.stud = c("treat"),
# sharingstrct = "randprop", surv.formula = Surv(survtime, cens) ~ treat,
# study.name = "study", strat = TRUE)
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit4SE <- jointmetaSE(fitted = onestagefit4, n.boot = 200)
## -------------------------------------------------------------------------------------------------
#extract the saved model fit and bootstrap results
onestagefit4 <- onestage4$onestagefit4
onestagefit4SE <- onestage4$onestagefit4SE
## -------------------------------------------------------------------------------------------------
summary(onestagefit4)
## -------------------------------------------------------------------------------------------------
rancov(fitted = onestagefit4, type = "individual")
## -------------------------------------------------------------------------------------------------
rancov(fitted = onestagefit4, type = "study")
## ---- eval=FALSE----------------------------------------------------------------------------------
# ###CODE NOT RUN
# #to extract the results from a separate longitudinal model
# fitted$sepests$longests$modelfit
#
# #to extract the results from a separate survival model
# fitted$sepests$survests$modelfit
| /data/genthat_extracted_code/joineRmeta/vignettes/joineRmeta.R | no_license | surayaaramli/typeRrh | R | false | false | 21,385 | r | ## ----set-options, echo=FALSE, cache=FALSE---------------------------------------------------------
options(width = 100)
## -------------------------------------------------------------------------------------------------
library("joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("simjointmeta", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
exampledat1 <- simjointmeta(k = 5, n = rep(500, 5), sepassoc = FALSE,
ntms = 5,longmeasuretimes = c(0, 1, 2, 3, 4),
beta1 = c(1, 2, 3), beta2 = 1,
rand_ind = "intslope", rand_stud = NULL,
gamma_ind = 1,
sigb_ind = matrix(c(1, 0.5, 0.5, 1.5),nrow = 2),
vare = 0.01, theta0 = -3, theta1 = 1,
censoring = TRUE, censlam = exp(-3),
truncation = FALSE,
trunctime = max(longmeasuretimes))
## ---- eval=FALSE----------------------------------------------------------------------------------
# exampledat1$percentevent
## -------------------------------------------------------------------------------------------------
str(exampledat1$longitudinal)
## -------------------------------------------------------------------------------------------------
str(exampledat1$survival)
## -------------------------------------------------------------------------------------------------
exampledat2 <- simjointmeta(k = 5, n = rep(500, 5), sepassoc = TRUE, ntms = 5,
longmeasuretimes = c(0, 1, 2, 3, 4), beta1 = c(1, 2, 3),
beta2 = 1, rand_ind = "intslope", rand_stud = "inttreat",
gamma_ind = c(0.5, 1), gamma_stud = c(0.5, 1),
sigb_ind = matrix(c(1, 0.5, 0.5, 1.5), nrow = 2),
sigb_stud = matrix(c(1, 0.5, 0.5, 1.5), nrow = 2),
vare = 0.01, theta0 = -3, theta1 = 1, censoring = TRUE,
censlam = exp(-3), truncation = FALSE,
trunctime = max(longmeasuretimes))
## -------------------------------------------------------------------------------------------------
gamma_ind_set <- list(c(0.5, 1), c(0.4, 0.9), c(0.6, 1.1), c(0.5, 0.9), c(0.4, 1.1))
gamma_stud_set <- list(c(0.6, 1.1), c(0.5, 1), c(0.5, 0.9), c(0.4, 1.1), c(0.4, 0.9))
censlamset <- c(exp(-3), exp(-2.9), exp(-3.1), exp(-3), exp(-3.05))
theta0set <- c(-3, -2.9, -3, -2.9, -3.1)
theta1set <- c(1, 0.9, 1.1, 1, 0.9)
exampledat2 <- simjointmeta(k = 5, n = rep(500, 5), sepassoc = TRUE, ntms = 5,
longmeasuretimes = c(0, 1, 2, 3, 4), beta1 = c(1, 2, 3),
beta2 = 1, rand_ind = "intslope", rand_stud = "inttreat",
gamma_ind = gamma_ind_set, gamma_stud = gamma_stud_set,
sigb_ind = matrix(c(1, 0.5, 0.5, 1.5), nrow = 2),
sigb_stud = matrix(c(1, 0.5, 0.5, 1.5), nrow = 2),
vare = 0.01, theta0 = theta0set, theta1 = theta1set,
censoring = TRUE, censlam = censlamset, truncation = FALSE,
trunctime = max(longmeasuretimes))
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("simdat2", package = "joineRmeta")
# help("simdat3", package = "joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# data("simdat2")
# str(simdat2)
## -------------------------------------------------------------------------------------------------
jointdat<-tojointdata(longitudinal = simdat2$longitudinal,
survival = simdat2$survival, id = "id", longoutcome = "Y",
timevarying = c("time","ltime"), survtime = "survtime", cens = "cens",
time = "time")
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("tojointdata", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
str(jointdat)
## -------------------------------------------------------------------------------------------------
jointdat$baseline$study <- as.factor(jointdat$baseline$study)
jointdat$baseline$treat <- as.factor(jointdat$baseline$treat)
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("removeafter", package = "joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# str(simdat3)
## -------------------------------------------------------------------------------------------------
jointdat3<-tojointdata(longitudinal = simdat3$longitudinal,
survival = simdat3$survival, id = "id", longoutcome = "Y",
timevarying = c("time","ltime"), survtime = "survtime", cens = "cens",
time = "time")
## ---- results = "hide"----------------------------------------------------------------------------
jointdat3.1<-removeafter(data = jointdat3, longitudinal = "Y",
survival = "survtime", id = "id", time = "time")
## -------------------------------------------------------------------------------------------------
str(jointdat3)
str(jointdat3.1)
## -------------------------------------------------------------------------------------------------
sepplots <- jointmetaplot(dataset = jointdat, study = "study", longoutcome = "Y",
longtime = "time", survtime = "survtime", cens = "cens",
id = "id", smoother = TRUE,
studynames = c("Study 1", "Study 2", "Study 3"), type = "Both")
## ---- fig.show='hold', fig.keep='high'------------------------------------------------------------
sepplots$longplots$`studyplot.Study 3`
sepplots$eventplots[[1]]
## -------------------------------------------------------------------------------------------------
sepplots2 <- jointmetaplot(dataset = jointdat, study = "study", longoutcome = "Y",
longtime = "time", survtime = "survtime", cens = "cens",
id = "id", smoother = TRUE,
studynames = c("Study 1", "Study 2", "Study 3"),
type = "Both", eventby = "treat")
sepplots3 <- jointmetaplot(dataset = jointdat, study = "study", longoutcome = "Y",
longtime = "time", survtime = "survtime", cens = "cens",
id = "id", smoother = TRUE,
studynames = c("Study 1", "Study 2", "Study 3"),
type = "Event", eventconfint = TRUE)
sepplots2$eventplots$`studyplot.Study 3`
sepplots3$eventplots[[2]]
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("jointmetaplot", package = "joineRmeta")
## ---- fig.show='hide'-----------------------------------------------------------------------------
allplot2 <- suppressWarnings(jointmetaplotall(plotlist = sepplots2, ncol = 2,
top = "All studies",
type = "Both"))
## ---- eval=FALSE----------------------------------------------------------------------------------
# allplot2$longall
# allplot2$eventsall
## ---- fig.height=10, fig.width=5------------------------------------------------------------------
allplot2$longall
allplot2$eventsall
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("jointmetaplotall", package = "joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("joineRfits", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
joineRmodels <- joineRfits[c("joineRfit1", "joineRfit2", "joineRfit3")]
joineRmodelsSE <- joineRfits[c("joineRfit1SE", "joineRfit2SE", "joineRfit3SE")]
## -------------------------------------------------------------------------------------------------
summary(joineRmodels[[1]])
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("joineRfits2", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
joineRmodels2 <- joineRfits2[c("joineRfit6", "joineRfit7", "joineRfit8",
"joineRfit9", "joineRfit10")]
joineRmodels2SE <- joineRfits2[c("joineRfit6SE", "joineRfit7SE", "joineRfit8SE",
"joineRfit9SE", "joineRfit10SE")]
## -------------------------------------------------------------------------------------------------
summary(joineRmodels2[[1]])
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("JMfits", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
summary(JMfits[[1]])
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("JMfits2", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
summary(JMfits2[[1]])
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("jointmeta2", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
MAjoineRfits <- jointmeta2(fits = joineRmodels, SE = joineRmodelsSE,
longpar = c("time", "treat1"),
survpar = "treat1", assoc = TRUE,
studynames = c("Study 1","Study 2", "Study 3"))
## -------------------------------------------------------------------------------------------------
names(MAjoineRfits$longMA)
## ---- fig.height=4, fig.width=9, warning = FALSE, message = FALSE---------------------------------
library(meta)
forest(MAjoineRfits$longMA$treat1)
## ---- error = TRUE--------------------------------------------------------------------------------
MAjoineRfits2 <- jointmeta2(fits = c(joineRmodels[1:3], joineRmodels2[1:2]),
SE = c(joineRmodelsSE[1:3],joineRmodels2SE[1:2]),
longpar = c("time", "treat1"), survpar = "treat1",
assoc = TRUE,
studynames = c("Study 1","Study 2", "Study 3"))
## -------------------------------------------------------------------------------------------------
MAJMfits <- jointmeta2(fits = JMfits, longpar = c("time", "treat1"),
survpar = "treat1", assoc = TRUE,
studynames = c("Study 1","Study 2", "Study 3"))
## -------------------------------------------------------------------------------------------------
MAJMfits2 <- jointmeta2(fits = JMfits2, longpar = c("time", "treat1"),
survpar = "treat1", assoc = TRUE,
studynames = c("Study 1","Study 2", "Study 3"))
## ---- error = TRUE--------------------------------------------------------------------------------
MAtest <- jointmeta2(fits = c(JMfits2[1:3], JMfits[1:2]),
longpar = c("time", "treat1"),
survpar = "treat1", assoc = TRUE,
studynames = c("Study 1","Study 2", "Study 3"))
## ---- error = TRUE--------------------------------------------------------------------------------
MAtest <- jointmeta2(fits = c(JMfits2[1:3], joineRfits[1:2]),
longpar = c("time", "treat1"),
survpar = "treat1", assoc = TRUE,
studynames = c("Study 1","Study 2", "Study 3"))
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("jointmeta1", package = "joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("onestage1", package = "joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit0 <- jointmeta1(data = jointdat,
# long.formula = Y ~ 1 + time + treat,
# long.rand.ind = c("int", "time"),
# sharingstrct = "randprop",
# surv.formula = Surv(survtime, cens) ~ treat,
# study.name = "study", strat = F)
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit1 <- jointmeta1(data = jointdat,
# long.formula = Y ~ 1 + time + treat*study,
# long.rand.ind = c("int", "time"),
# sharingstrct = "randprop",
# surv.formula = Surv(survtime, cens) ~ treat*study,
# study.name = "study", strat = F)
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("jointmeta1.object", package = "joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit1SE <- jointmetaSE(fitted = onestagefit1, n.boot = 200,
# overalleffects = list(long = list(c("treat1", "treat1:study2"),
# c("treat1", "treat1:study3")),
# surv = list(c("treat1", "treat1:study2"),
# c("treat1", "treat1:study3"))))
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("jointmeta1SE.object", package = "joineRmeta")
## -------------------------------------------------------------------------------------------------
#extract the saved model fit and bootstrap results
onestagefit1 <- onestage1$onestagefit1
onestagefit1SE <- onestage1$onestagefit1SE
## -------------------------------------------------------------------------------------------------
summary(onestagefit1)
## -------------------------------------------------------------------------------------------------
print(onestagefit1)
## -------------------------------------------------------------------------------------------------
fixef(onestagefit1, type = "Longitudinal")
## -------------------------------------------------------------------------------------------------
fixef(onestagefit1, type = "Survival")
## -------------------------------------------------------------------------------------------------
fixef(onestagefit1, type = "Latent")
## ---- eval=FALSE----------------------------------------------------------------------------------
# ranef(onestagefit1, type = "individual")
## -------------------------------------------------------------------------------------------------
rancov(onestagefit1, type = "individual")
## -------------------------------------------------------------------------------------------------
formula(onestagefit1, type = "Longitudinal")
## -------------------------------------------------------------------------------------------------
formula(onestagefit1, type = "Survival")
## -------------------------------------------------------------------------------------------------
formula(onestagefit1, type = "Rand_ind")
## -------------------------------------------------------------------------------------------------
print(onestagefit1SE)
## -------------------------------------------------------------------------------------------------
confint(onestagefit1SE)
## ---- eval=FALSE----------------------------------------------------------------------------------
# vcov(onestagefit1SE)
## ---- eval=FALSE----------------------------------------------------------------------------------
# help("onestage2", package = "joineRmeta")
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit2 <- jointmeta1(data = jointdat,
# long.formula = Y ~ 1 + time + treat,
# long.rand.ind = c("int", "time"),
# long.rand.stud = c("study", "treat"),
# sharingstrct = "randprop",
# surv.formula = Surv(survtime, cens) ~ treat,
# study.name = "study", strat = F)
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit2SE<-jointmetaSE(fitted = onestagefit2, n.boot = 200)
## -------------------------------------------------------------------------------------------------
#extract the saved model fit and bootstrap results
onestagefit2<-onestage2$onestagefit2
onestagefit2SE<-onestage2$onestagefit2SE
## ---- eval=FALSE----------------------------------------------------------------------------------
# ranef(onestagefit2, type = "study")
## -------------------------------------------------------------------------------------------------
rancov(onestagefit2, type = "study")
## -------------------------------------------------------------------------------------------------
formula(onestagefit2, type = "Rand_stud")
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit3 <- jointmeta1(data = jointdat,
# long.formula = Y ~ 1 + time + treat*study,
# long.rand.ind = c("int", "time"),
# sharingstrct = "randprop",
# surv.formula = Surv(survtime, cens) ~ treat,
# study.name = "study", strat = TRUE)
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit3SE <- jointmetaSE(fitted = onestagefit3, n.boot = 200,
# overalleffects = list(long = list(c("treat1", "treat1:study2"),
# c("treat1", "treat1:study3")))))
## -------------------------------------------------------------------------------------------------
#extract the saved model fit and bootstrap results
onestagefit3<-onestage3$onestagefit3
onestagefit3SE<-onestage3$onestagefit3SE
## -------------------------------------------------------------------------------------------------
summary(onestagefit3)
## -------------------------------------------------------------------------------------------------
rancov(fitted = onestagefit3, type = "individual")
## ---- error = TRUE--------------------------------------------------------------------------------
rancov(fitted = onestagefit3, type = "study")
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit4 <- jointmeta1(data = jointdat, long.formula = Y ~ 1 + time + treat + study,
# long.rand.ind = c("int", "time"), long.rand.stud = c("treat"),
# sharingstrct = "randprop", surv.formula = Surv(survtime, cens) ~ treat,
# study.name = "study", strat = TRUE)
## ---- eval=FALSE----------------------------------------------------------------------------------
# onestagefit4SE <- jointmetaSE(fitted = onestagefit4, n.boot = 200)
## -------------------------------------------------------------------------------------------------
#extract the saved model fit and bootstrap results
onestagefit4 <- onestage4$onestagefit4
onestagefit4SE <- onestage4$onestagefit4SE
## -------------------------------------------------------------------------------------------------
summary(onestagefit4)
## -------------------------------------------------------------------------------------------------
rancov(fitted = onestagefit4, type = "individual")
## -------------------------------------------------------------------------------------------------
rancov(fitted = onestagefit4, type = "study")
## ---- eval=FALSE----------------------------------------------------------------------------------
# ###CODE NOT RUN
# #to extract the results from a separate longitudinal model
# fitted$sepests$longests$modelfit
#
# #to extract the results from a separate survival model
# fitted$sepests$survests$modelfit
|
library(tidyverse) ; library(lubridate) ; library(sf)
# Confirmed cases
# Source: Public Health England
# URL: https://coronavirus.data.gov.uk
cases <- read_csv("https://coronavirus.data.gov.uk/downloads/csv/coronavirus-cases_latest.csv") %>%
filter(`Area type` == "ltla") %>%
mutate(`Specimen date` = as.Date(`Specimen date`, format = "%Y-%m-%d")) %>%
select(date = `Specimen date`,
area_code = `Area code`,
area_name = `Area name`,
new_cases = `Daily lab-confirmed cases`) %>%
arrange(date) %>%
group_by(area_code, area_name) %>%
complete(date = seq.Date(min(date), max(date), by = "day")) %>%
mutate(new_cases = replace_na(new_cases, 0),
cum_cases = cumsum(new_cases)) %>%
ungroup() %>%
fill(area_name)
# MSOAs in England
# Source: ONS Open Geography Portal
# URL: https://geoportal.statistics.gov.uk/datasets/middle-layer-super-output-areas-december-2011-boundaries-ew-bgc
msoa <- st_read("data/msoa.geojson")
# MSOA lookup
# Source: ONS Open Geography Portal; House of Commons Library
msoa_lookup <- read_csv("data/msoa_lookup.csv")
# Latest 7 days of cases by MSOA
# Source: Public Health England
# URL: https://coronavirus.data.gov.uk/
msoa_cases <- read_csv("https://coronavirus.data.gov.uk/downloads/msoa_data/MSOAs_latest.csv") %>%
filter(date == max(date)) %>%
select(msoa11cd = areaCode, date, n = newCasesBySpecimenDateRollingSum, rate = newCasesBySpecimenDateRollingRate) %>%
mutate(n = replace_na(n, 0),
rate = replace_na(rate, 0)) %>%
left_join(msoa_lookup, by = "msoa11cd") %>%
select(date, msoa11cd, msoa11hclnm, lad19cd, lad19nm, n, rate) %>%
# combine Hackney and City of London / Cornwall and Isles of Scilly
mutate(lad19nm = as.character(lad19nm),
lad19nm = case_when(
lad19nm %in% c("Cornwall", "Isles of Scilly") ~ "Cornwall and Isles of Scilly",
lad19nm %in% c("City of London", "Hackney") ~ "Hackney and City of London",
TRUE ~ lad19nm))
# Mid-2019 population estimates
# Source: Nomis / ONS
# URL: https://www.nomisweb.co.uk/datasets/pestsyoala
population <- read_csv("data/population.csv")
| /global.R | permissive | VincenzoM98/covid-19 | R | false | false | 2,136 | r | library(tidyverse) ; library(lubridate) ; library(sf)
# Confirmed cases
# Source: Public Health England
# URL: https://coronavirus.data.gov.uk
cases <- read_csv("https://coronavirus.data.gov.uk/downloads/csv/coronavirus-cases_latest.csv") %>%
filter(`Area type` == "ltla") %>%
mutate(`Specimen date` = as.Date(`Specimen date`, format = "%Y-%m-%d")) %>%
select(date = `Specimen date`,
area_code = `Area code`,
area_name = `Area name`,
new_cases = `Daily lab-confirmed cases`) %>%
arrange(date) %>%
group_by(area_code, area_name) %>%
complete(date = seq.Date(min(date), max(date), by = "day")) %>%
mutate(new_cases = replace_na(new_cases, 0),
cum_cases = cumsum(new_cases)) %>%
ungroup() %>%
fill(area_name)
# MSOAs in England
# Source: ONS Open Geography Portal
# URL: https://geoportal.statistics.gov.uk/datasets/middle-layer-super-output-areas-december-2011-boundaries-ew-bgc
msoa <- st_read("data/msoa.geojson")
# MSOA lookup
# Source: ONS Open Geography Portal; House of Commons Library
msoa_lookup <- read_csv("data/msoa_lookup.csv")
# Latest 7 days of cases by MSOA
# Source: Public Health England
# URL: https://coronavirus.data.gov.uk/
msoa_cases <- read_csv("https://coronavirus.data.gov.uk/downloads/msoa_data/MSOAs_latest.csv") %>%
filter(date == max(date)) %>%
select(msoa11cd = areaCode, date, n = newCasesBySpecimenDateRollingSum, rate = newCasesBySpecimenDateRollingRate) %>%
mutate(n = replace_na(n, 0),
rate = replace_na(rate, 0)) %>%
left_join(msoa_lookup, by = "msoa11cd") %>%
select(date, msoa11cd, msoa11hclnm, lad19cd, lad19nm, n, rate) %>%
# combine Hackney and City of London / Cornwall and Isles of Scilly
mutate(lad19nm = as.character(lad19nm),
lad19nm = case_when(
lad19nm %in% c("Cornwall", "Isles of Scilly") ~ "Cornwall and Isles of Scilly",
lad19nm %in% c("City of London", "Hackney") ~ "Hackney and City of London",
TRUE ~ lad19nm))
# Mid-2019 population estimates
# Source: Nomis / ONS
# URL: https://www.nomisweb.co.uk/datasets/pestsyoala
population <- read_csv("data/population.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Oysters.R
\docType{data}
\name{Oysters}
\alias{Oysters}
\title{A simulated dataset of oyster catch}
\format{
A data frame with 21 rows and 2 variables:
\describe{
\item{Density}{Number of boat passes through the oyster bed.}
\item{Harvest}{The number of bushels of oysters harvested.}
}
}
\usage{
Oysters
}
\description{
This data describes an oyster harvest. Each observation corresponds to
an hectare of oyster beds and the density describes the intensity in which
the bed was harvested (number of passes by a havesting boat) and the Harvest is
the number of bushels obtained.
}
\keyword{datasets}
| /man/Oysters.Rd | no_license | dereksonderegger/dsData | R | false | true | 683 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Oysters.R
\docType{data}
\name{Oysters}
\alias{Oysters}
\title{A simulated dataset of oyster catch}
\format{
A data frame with 21 rows and 2 variables:
\describe{
\item{Density}{Number of boat passes through the oyster bed.}
\item{Harvest}{The number of bushels of oysters harvested.}
}
}
\usage{
Oysters
}
\description{
This data describes an oyster harvest. Each observation corresponds to
an hectare of oyster beds and the density describes the intensity in which
the bed was harvested (number of passes by a havesting boat) and the Harvest is
the number of bushels obtained.
}
\keyword{datasets}
|
rm( list=ls() )
graphics.off()
windows(record=TRUE)
library(read.dbc)
library(tidyverse)
#------------------------------------------------
# Brazil data has to be processed state by state
#------------------------------------------------
state = c("AC", "AL", "AM", "AP", "BA",
"CE", "DF", "ES", "GO", "MA",
"MG", "MS", "MT", "PA", "PB",
"PE", "PI", "PR", "RJ", "RN", "RO",
"RR", "RS", "SC", "SE", "SP", "TO")
region = c('N','NE','N','N','NE',
'NE','CW','SE','CW','NE',
'SE','CW','CW','N','NE',
'NE','NE','S','SE','NE','N',
'N','S','S','NE','SE','CW'
)
geo = data.frame( i=seq(state), state, region)
#---------------------------------------
day_number = function(x) {
tmp = as.Date(x, '%d%m%Y') %>%
strftime(format='%j') %>%
as.numeric()
return(tmp)
}
day_of_week = function(x) {
tmp = as.Date(x, '%d%m%Y') %>%
strftime(format='%w')
return(tmp)
}
#--------------------------------------
# assemble death summaries, looping
# over years and states
#--------------------------------------
D = data.frame() # summary
is_end_of_year = function(month, dom) {
((month==12) & (dom %in% 30:31)) |
((month==1 ) & (dom %in% 1:2))
}
# hour of death is only available from 2006 on
years = 2006:2016
nyrs = length(years)
for (this_year in years) {
for (i in seq(state)) {
this_state = state[i]
this_region = region[i]
print(paste(this_year,this_state))
fname = paste0('./SIM-raw-data/DO',this_state,this_year,'.dbc')
tmp = read.dbc(fname) %>%
mutate(state = this_state,
region = this_region,
year = this_year,
hour = as.numeric(as.character(HORAOBITO)) %/% 100,
month = as.numeric(substr(DTOBITO,3,4)),
dom = as.numeric(substr(DTOBITO,1,2)),
doy = day_number(DTOBITO),
dow = day_of_week(DTOBITO),
circumstance = factor(CIRCOBITO,
levels=c(1:4,9),
ordered=FALSE,
labels=c('Accident',
'Suicide','Homicide',
'Other','Unknown'))
) %>%
filter(TIPOBITO == 2,
SEXO %in% 1:2,
hour %in% 0:23,
is_end_of_year(month,dom)
) %>%
select(state:circumstance,
date = DTOBITO,
agecode = IDADE,
sex = SEXO,
res_municode = CODMUNRES,
occ_municode = CODMUNOCOR,
cause = CAUSABAS)
keep = tmp %>%
mutate(Southern = (region %in% c('S','SE'))) %>%
group_by(year,month, dom, doy, dow, hour, Southern, circumstance) %>%
summarize(deaths=n()) %>%
ungroup()
D = rbind(D, keep)
} # this_state
} # this_year
# add a variable for hours since 00:00 on 30 Dec
calcH = function(month,dom,hour) {
(month==12) * ( 24*(dom-30) + hour ) +
(month==1) * ( 48 + 24*(dom-1) + hour)
}
D = D %>%
mutate( H = calcH(month,dom,hour) )
write.csv(D, file='reveillon.csv')
# sum over states and years
df = D %>%
group_by(H, circumstance) %>%
summarize( deaths=sum(deaths)) %>%
ungroup()
write.csv(df, file='reveillon-hours.csv')
#########################################
# first ALL deaths -- in 6 hour blocks
tmp = df %>%
group_by(H) %>%
summarize(deaths = sum(deaths))
ggplot( data=tmp, aes(x=H+.5, y=deaths)) +
geom_point(size=3, color='royalblue') +
geom_line(color='royalblue') +
theme_bw() +
labs(title='Registered Deaths by Hour Before/After New Year\'s Eve',
subtitle='Brazil 2006-2016',
caption='Source: SIM/Datasus http://www.datasus.gov.br',
x='Hour',
y='Total Deaths (all years)') +
scale_x_continuous(breaks=seq(0,90,6),
minor_breaks =NULL,
labels=c('\n30 Dec','6','12','18',
'\n31 Dec','6','12','18',
'\n1 Jan', '6','12','18',
'\n2 Jan', '6','12','18')) +
geom_vline(xintercept=seq(0,96,24)) +
geom_vline(xintercept=48, lwd=2, color='orange',alpha=.50) +
geom_vline(xintercept=12+seq(0,72,24),lty=2,col='grey')
ggsave(file='reveillon-all.png',
width=11, height=8.5)
# deaths by cause on one plot
tmp = df %>%
filter(circumstance %in% c('Accident','Suicide','Homicide')) %>%
group_by(H,circumstance) %>%
summarize(deaths = sum(deaths))
ggplot( data=tmp, aes(x=H+.5, y=deaths, color=circumstance)) +
geom_point(size=3) +
geom_line() +
theme_bw() +
labs(title='Registered Deaths by Hour Before/After New Year\'s Eve',
subtitle='Brazil 2006-2016',
caption='Source: SIM/Datasus http://www.datasus.gov.br',
x='Hour',
y='Total Deaths (all years)') +
scale_x_continuous(breaks=seq(0,90,6),
minor_breaks =NULL,
labels=c('\n30 Dec','6','12','18',
'\n31 Dec','6','12','18',
'\n1 Jan', '6','12','18',
'\n2 Jan', '6','12','18')) +
geom_vline(xintercept=seq(0,96,24)) +
geom_vline(xintercept=48, lwd=2, color='orange',alpha=.50) +
geom_vline(xintercept=12+seq(0,72,24),lty=2,col='grey')
ggsave(file='reveillon-ASH.png',
width=11, height=8.5)
# separate for each cause
for (k in c('Suicide','Homicide','Accident')) {
tmp = df %>%
filter(circumstance== k) %>%
group_by(H) %>%
summarize(deaths = sum(deaths))
G= ggplot( data=tmp, aes(x=H+.5, y=deaths)) +
geom_point(size=3, color='royalblue') +
geom_line(color='royalblue') +
theme_bw() +
labs(title=paste0(k,'s by Hour Before/After New Year\'s Eve'),
subtitle='Brazil 2006-2016',
caption='Source: SIM/Datasus http://www.datasus.gov.br',
x='Hour',
y='Total Deaths (all years)') +
scale_x_continuous(breaks=seq(0,90,6),
minor_breaks =NULL,
labels=c('\n30 Dec','6','12','18',
'\n31 Dec','6','12','18',
'\n1 Jan', '6','12','18',
'\n2 Jan', '6','12','18')) + geom_vline(xintercept=seq(0,96,24)) +
geom_vline(xintercept=48, lwd=2, color='orange',alpha=.50) +
geom_vline(xintercept=12+seq(0,72,24),lty=2,col='grey')
print(G)
ggsave(file=paste0('reveillon-',k,'.png'),
width=11, height=8.5)
}
| /BR-deaths/reveillon.R | no_license | tbernardesfaria/bonecave | R | false | false | 7,071 | r | rm( list=ls() )
graphics.off()
windows(record=TRUE)
library(read.dbc)
library(tidyverse)
#------------------------------------------------
# Brazil data has to be processed state by state
#------------------------------------------------
state = c("AC", "AL", "AM", "AP", "BA",
"CE", "DF", "ES", "GO", "MA",
"MG", "MS", "MT", "PA", "PB",
"PE", "PI", "PR", "RJ", "RN", "RO",
"RR", "RS", "SC", "SE", "SP", "TO")
region = c('N','NE','N','N','NE',
'NE','CW','SE','CW','NE',
'SE','CW','CW','N','NE',
'NE','NE','S','SE','NE','N',
'N','S','S','NE','SE','CW'
)
geo = data.frame( i=seq(state), state, region)
#---------------------------------------
day_number = function(x) {
tmp = as.Date(x, '%d%m%Y') %>%
strftime(format='%j') %>%
as.numeric()
return(tmp)
}
day_of_week = function(x) {
tmp = as.Date(x, '%d%m%Y') %>%
strftime(format='%w')
return(tmp)
}
#--------------------------------------
# assemble death summaries, looping
# over years and states
#--------------------------------------
D = data.frame() # summary
is_end_of_year = function(month, dom) {
((month==12) & (dom %in% 30:31)) |
((month==1 ) & (dom %in% 1:2))
}
# hour of death is only available from 2006 on
years = 2006:2016
nyrs = length(years)
for (this_year in years) {
for (i in seq(state)) {
this_state = state[i]
this_region = region[i]
print(paste(this_year,this_state))
fname = paste0('./SIM-raw-data/DO',this_state,this_year,'.dbc')
tmp = read.dbc(fname) %>%
mutate(state = this_state,
region = this_region,
year = this_year,
hour = as.numeric(as.character(HORAOBITO)) %/% 100,
month = as.numeric(substr(DTOBITO,3,4)),
dom = as.numeric(substr(DTOBITO,1,2)),
doy = day_number(DTOBITO),
dow = day_of_week(DTOBITO),
circumstance = factor(CIRCOBITO,
levels=c(1:4,9),
ordered=FALSE,
labels=c('Accident',
'Suicide','Homicide',
'Other','Unknown'))
) %>%
filter(TIPOBITO == 2,
SEXO %in% 1:2,
hour %in% 0:23,
is_end_of_year(month,dom)
) %>%
select(state:circumstance,
date = DTOBITO,
agecode = IDADE,
sex = SEXO,
res_municode = CODMUNRES,
occ_municode = CODMUNOCOR,
cause = CAUSABAS)
keep = tmp %>%
mutate(Southern = (region %in% c('S','SE'))) %>%
group_by(year,month, dom, doy, dow, hour, Southern, circumstance) %>%
summarize(deaths=n()) %>%
ungroup()
D = rbind(D, keep)
} # this_state
} # this_year
# add a variable for hours since 00:00 on 30 Dec
calcH = function(month,dom,hour) {
(month==12) * ( 24*(dom-30) + hour ) +
(month==1) * ( 48 + 24*(dom-1) + hour)
}
D = D %>%
mutate( H = calcH(month,dom,hour) )
write.csv(D, file='reveillon.csv')
# sum over states and years
df = D %>%
group_by(H, circumstance) %>%
summarize( deaths=sum(deaths)) %>%
ungroup()
write.csv(df, file='reveillon-hours.csv')
#########################################
# first ALL deaths -- in 6 hour blocks
tmp = df %>%
group_by(H) %>%
summarize(deaths = sum(deaths))
ggplot( data=tmp, aes(x=H+.5, y=deaths)) +
geom_point(size=3, color='royalblue') +
geom_line(color='royalblue') +
theme_bw() +
labs(title='Registered Deaths by Hour Before/After New Year\'s Eve',
subtitle='Brazil 2006-2016',
caption='Source: SIM/Datasus http://www.datasus.gov.br',
x='Hour',
y='Total Deaths (all years)') +
scale_x_continuous(breaks=seq(0,90,6),
minor_breaks =NULL,
labels=c('\n30 Dec','6','12','18',
'\n31 Dec','6','12','18',
'\n1 Jan', '6','12','18',
'\n2 Jan', '6','12','18')) +
geom_vline(xintercept=seq(0,96,24)) +
geom_vline(xintercept=48, lwd=2, color='orange',alpha=.50) +
geom_vline(xintercept=12+seq(0,72,24),lty=2,col='grey')
ggsave(file='reveillon-all.png',
width=11, height=8.5)
# deaths by cause on one plot
tmp = df %>%
filter(circumstance %in% c('Accident','Suicide','Homicide')) %>%
group_by(H,circumstance) %>%
summarize(deaths = sum(deaths))
ggplot( data=tmp, aes(x=H+.5, y=deaths, color=circumstance)) +
geom_point(size=3) +
geom_line() +
theme_bw() +
labs(title='Registered Deaths by Hour Before/After New Year\'s Eve',
subtitle='Brazil 2006-2016',
caption='Source: SIM/Datasus http://www.datasus.gov.br',
x='Hour',
y='Total Deaths (all years)') +
scale_x_continuous(breaks=seq(0,90,6),
minor_breaks =NULL,
labels=c('\n30 Dec','6','12','18',
'\n31 Dec','6','12','18',
'\n1 Jan', '6','12','18',
'\n2 Jan', '6','12','18')) +
geom_vline(xintercept=seq(0,96,24)) +
geom_vline(xintercept=48, lwd=2, color='orange',alpha=.50) +
geom_vline(xintercept=12+seq(0,72,24),lty=2,col='grey')
ggsave(file='reveillon-ASH.png',
width=11, height=8.5)
# separate for each cause
for (k in c('Suicide','Homicide','Accident')) {
tmp = df %>%
filter(circumstance== k) %>%
group_by(H) %>%
summarize(deaths = sum(deaths))
G= ggplot( data=tmp, aes(x=H+.5, y=deaths)) +
geom_point(size=3, color='royalblue') +
geom_line(color='royalblue') +
theme_bw() +
labs(title=paste0(k,'s by Hour Before/After New Year\'s Eve'),
subtitle='Brazil 2006-2016',
caption='Source: SIM/Datasus http://www.datasus.gov.br',
x='Hour',
y='Total Deaths (all years)') +
scale_x_continuous(breaks=seq(0,90,6),
minor_breaks =NULL,
labels=c('\n30 Dec','6','12','18',
'\n31 Dec','6','12','18',
'\n1 Jan', '6','12','18',
'\n2 Jan', '6','12','18')) + geom_vline(xintercept=seq(0,96,24)) +
geom_vline(xintercept=48, lwd=2, color='orange',alpha=.50) +
geom_vline(xintercept=12+seq(0,72,24),lty=2,col='grey')
print(G)
ggsave(file=paste0('reveillon-',k,'.png'),
width=11, height=8.5)
}
|
#' calculate circle_circle layout manually
#'
#' @inheritParams gather_graph
#' @param layout a layout object
#'
#' @return
#' @export
#'
#' @examples
#' libray(tidygraph)
#' n <- 1000
#' microbiome <- data.frame(
#' otu = paste("OTU",1:n,sep="_"),
#' phylum = sample(paste("phylum",1:5,sep="_"),n,replace = T),
#' class = sample(paste("class",6:30,sep="_"),n,replace=T),
#' order = sample(paste("order",31:80,sep="_"),n,replace = T),
#' value = runif(n,min=1,max=1000)
#' )
#' index_micro <- c("phylum","class","order")
#' nodes_micro <- gather_graph_node(microbiome,index=index_micro, root="Bac")
#' edges_micro <- gather_graph_edge(microbiome,index=index_micro, root="Bac")
#' graph_micro <- tbl_graph(nodes_micro,edges_micro)
#' layout_micro <- create_layout(graph_micro,layout = "circle")
#' layout <- circle_circle(layout_micro,index=index_micro)
circle_circle <- function(layout,index=NULL){
if (is.null(index)){
warning("index is NULL, do nothing")
return(layout)
}
list <- lapply(seq_along(index),function(i){
idx <- index[[i]]
layout %>%
filter(node.level==idx) %>%
arrange(node.branch,node.short_name) %>%
mutate(x=cos((cumsum(node.count)-node.count*0.5)/sum(node.count)*2*pi)*i,
y=sin((cumsum(node.count)-node.count*0.5)/sum(node.count)*2*pi)*i)
})
layout[] <- do.call(bind_rows,list)
return(layout)
}
| /R/circle_circle.R | no_license | cfc424/ccgraph | R | false | false | 1,413 | r | #' calculate circle_circle layout manually
#'
#' @inheritParams gather_graph
#' @param layout a layout object
#'
#' @return
#' @export
#'
#' @examples
#' libray(tidygraph)
#' n <- 1000
#' microbiome <- data.frame(
#' otu = paste("OTU",1:n,sep="_"),
#' phylum = sample(paste("phylum",1:5,sep="_"),n,replace = T),
#' class = sample(paste("class",6:30,sep="_"),n,replace=T),
#' order = sample(paste("order",31:80,sep="_"),n,replace = T),
#' value = runif(n,min=1,max=1000)
#' )
#' index_micro <- c("phylum","class","order")
#' nodes_micro <- gather_graph_node(microbiome,index=index_micro, root="Bac")
#' edges_micro <- gather_graph_edge(microbiome,index=index_micro, root="Bac")
#' graph_micro <- tbl_graph(nodes_micro,edges_micro)
#' layout_micro <- create_layout(graph_micro,layout = "circle")
#' layout <- circle_circle(layout_micro,index=index_micro)
circle_circle <- function(layout,index=NULL){
if (is.null(index)){
warning("index is NULL, do nothing")
return(layout)
}
list <- lapply(seq_along(index),function(i){
idx <- index[[i]]
layout %>%
filter(node.level==idx) %>%
arrange(node.branch,node.short_name) %>%
mutate(x=cos((cumsum(node.count)-node.count*0.5)/sum(node.count)*2*pi)*i,
y=sin((cumsum(node.count)-node.count*0.5)/sum(node.count)*2*pi)*i)
})
layout[] <- do.call(bind_rows,list)
return(layout)
}
|
xUnique = 1:5
trueCoeff = c(0, 1, 1)
getData = function(coefs = c(0, 1, 1), xs = 1:5, dupl = 10,
sd = 5, seed=2222){
### This function creates the artificial data
set.seed(seed)
x = rep(xs, each = dupl)
y = coefs[1] + coefs[2]*x + coefs[3] * x^2 +
rnorm(length(x), 0, sd)
return(data.frame(x, y))
}
###
genBootY = function(x, y, rep = TRUE){
### For each unique x value, take a sample of the
### corresponding y values, with replacement.
### Return a vector of random y values the same length as y
### You can assume that the xs are sorted
### Hint use tapply here!
little.sample=tapply(y,x,function(hola) sample(hola,length(hola),replace=rep))
hola=unlist(little.sample,use.names=FALSE)
}
genBootR = function(fit, err, rep = TRUE){
### Sample the errors
### Add the errors to the fit to create a y vector
### Return a vector of y values the same length as fit
### HINT: It can be easier to sample the indices than the values
fit.with.errors=c(rep(0,length(fit)))
sample.errors=sample(err,length(err),rep=rep)
for (i in 1:length(fit)){
fit.with.errors[i]=fit[i]+sample.errors[i]
}
return(fit.with.errors)
}
fitModel = function(x, y, degree = 1){
### use the lm function to fit a line of a quadratic
### e.g. y ~ x or y ~ x + I(x^2)
### y and x are numeric vectors of the same length
### Return the coefficients as a vector
### HINT: Take a look at the repBoot function to see how to use lm()
if(degree==1){
hola=lm(y~x)
coeff=hola$coefficients
}
if(degree==2){
coeff=lm(y~x+I(x^2))$coefficients
}
return(coeff)
}
oneBoot = function(data, fit = NULL, degree = 1){
### data are either your data (from call to getData)
### OR fit and errors from fit of line to data
### OR fit and errors from fit of quadratic to data
if(is.null(fit)){
ynew=genBootY(data[,1],data[,2])
}else{ynew=genBootR(fit[,1],fit[,2])}
fitModel(data[,1],y=ynew,degree)
### Use fitModel to fit a model to this bootstrap Y
}
repBoot = function(data, B = 1000){
### Set up the inputs you need for oneBoot, i.e.,
### create errors and fits for line and quadratic
### replicate a call to oneBoot B times
### format the return value so that you have a list of
### length 4, one for each set of coefficients
### each element will contain a data frame with B rows
### and one or two columns, depending on whether the
### fit is for a line or a quadratic
### Return this list
### Replicate a call to oneBoot B times for
### each of the four conditions
### Format the return value so that you have a list of
### length 4, one for each set of coefficients
### each element will contain a matrix with B columns
### and two or three rows, depending on whether the
### fit is for a line or a quadratic
### Return this list
lresi=lm(data[,2]~data[,1])$residuals
qresi=lm(data[,2]~data[,1]+I(data[,1]^2))$residuals
lfit=lm(data[,2]~data[,1])$fitted.values
qfit=lm(data[,2]~data[,1]+I(data[,1]^2))$fitted.values
l=matrix(c(lfit,lresi),ncol=2)
q=matrix(c(qfit,qresi),ncol=2)
no1=c()
no2=c()
no3=c()
no4=c()
for (i in 1:B)
no1=c(no1,oneBoot(data,fit=NULL,degree=1))
for (i in 1:B)
no2=c(no2,oneBoot(data,fit=NULL,degree=2))
for (i in 1:B)
no3=c(no3,oneBoot(data,fit=l,degree=1))
for (i in 1:B)
no4=c(no4,oneBoot(data,fit=q,degree=2))
no1=as.data.frame(matrix(no1,ncol=2,byrow=T))
no2=as.data.frame(matrix(no2,ncol=3,byrow=T))
no3=as.data.frame(matrix(no3,ncol=2,byrow=T))
no4=as.data.frame(matrix(no4,ncol=3,byrow=T))
coeff=list(no1,no2,no3,no4)
return(coeff)
}
bootPlot = function(x, y, coeff, trueCoeff){
### x and y are the original data
### coeff is a matrix from repBoot
### trueCoeff contains the true coefficients
### that generated the data
### Make a scatter plot of data
### Add lines or curves for each row in coeff
### Use transparency
### You should use mapply to construct all
### 1000 of the bootstrapped lines of best fit
### Have a look at ?mapply for details.
### This can be done in ggplot2 or base graphics.
### Use trueCoeff to add true line/curve -
### Make the true line/curve stand out
plot(x,y)
if (nrow(coeff) == 2){
mapply(abline, coeff[1,],coeff[2,],col=rgb(0,0.2,.4,0.1))
}
if (nrow(coeff) == 3){
mapply(function(a,b,c){curve(a+b*x + c*(x^2),col=rgb(0,0.2,.4,0.1), add=TRUE)}, a=coeff[1,],b=coeff[2,],c=coeff[3,])
}
curve(trueCoeff[1]+trueCoeff[2]*x + trueCoeff[3]*(x^2),col="red",add=TRUE,lwd=3)
}
### Run your simulation by calling this function
### This function doesn't need any changing
runSim = function() {
xUnique = 1:5
trueCoeff = c(0, 1, 1)
myData = getData(coefs = trueCoeff, xs = xUnique)
expt = repBoot(data = myData)
par(mfrow = c(2, 2))
for (i in 1:4){
bootPlot(myData$x, myData$y,
coeff = expt[[i]], trueCoeff)
}
return(expt)
}
| /hw8/hw8.r | no_license | j170382276/stat133 | R | false | false | 4,970 | r | xUnique = 1:5
trueCoeff = c(0, 1, 1)
getData = function(coefs = c(0, 1, 1), xs = 1:5, dupl = 10,
sd = 5, seed=2222){
### This function creates the artificial data
set.seed(seed)
x = rep(xs, each = dupl)
y = coefs[1] + coefs[2]*x + coefs[3] * x^2 +
rnorm(length(x), 0, sd)
return(data.frame(x, y))
}
###
genBootY = function(x, y, rep = TRUE){
### For each unique x value, take a sample of the
### corresponding y values, with replacement.
### Return a vector of random y values the same length as y
### You can assume that the xs are sorted
### Hint use tapply here!
little.sample=tapply(y,x,function(hola) sample(hola,length(hola),replace=rep))
hola=unlist(little.sample,use.names=FALSE)
}
genBootR = function(fit, err, rep = TRUE){
### Sample the errors
### Add the errors to the fit to create a y vector
### Return a vector of y values the same length as fit
### HINT: It can be easier to sample the indices than the values
fit.with.errors=c(rep(0,length(fit)))
sample.errors=sample(err,length(err),rep=rep)
for (i in 1:length(fit)){
fit.with.errors[i]=fit[i]+sample.errors[i]
}
return(fit.with.errors)
}
fitModel = function(x, y, degree = 1){
### use the lm function to fit a line of a quadratic
### e.g. y ~ x or y ~ x + I(x^2)
### y and x are numeric vectors of the same length
### Return the coefficients as a vector
### HINT: Take a look at the repBoot function to see how to use lm()
if(degree==1){
hola=lm(y~x)
coeff=hola$coefficients
}
if(degree==2){
coeff=lm(y~x+I(x^2))$coefficients
}
return(coeff)
}
oneBoot = function(data, fit = NULL, degree = 1){
### data are either your data (from call to getData)
### OR fit and errors from fit of line to data
### OR fit and errors from fit of quadratic to data
if(is.null(fit)){
ynew=genBootY(data[,1],data[,2])
}else{ynew=genBootR(fit[,1],fit[,2])}
fitModel(data[,1],y=ynew,degree)
### Use fitModel to fit a model to this bootstrap Y
}
repBoot = function(data, B = 1000){
### Set up the inputs you need for oneBoot, i.e.,
### create errors and fits for line and quadratic
### replicate a call to oneBoot B times
### format the return value so that you have a list of
### length 4, one for each set of coefficients
### each element will contain a data frame with B rows
### and one or two columns, depending on whether the
### fit is for a line or a quadratic
### Return this list
### Replicate a call to oneBoot B times for
### each of the four conditions
### Format the return value so that you have a list of
### length 4, one for each set of coefficients
### each element will contain a matrix with B columns
### and two or three rows, depending on whether the
### fit is for a line or a quadratic
### Return this list
lresi=lm(data[,2]~data[,1])$residuals
qresi=lm(data[,2]~data[,1]+I(data[,1]^2))$residuals
lfit=lm(data[,2]~data[,1])$fitted.values
qfit=lm(data[,2]~data[,1]+I(data[,1]^2))$fitted.values
l=matrix(c(lfit,lresi),ncol=2)
q=matrix(c(qfit,qresi),ncol=2)
no1=c()
no2=c()
no3=c()
no4=c()
for (i in 1:B)
no1=c(no1,oneBoot(data,fit=NULL,degree=1))
for (i in 1:B)
no2=c(no2,oneBoot(data,fit=NULL,degree=2))
for (i in 1:B)
no3=c(no3,oneBoot(data,fit=l,degree=1))
for (i in 1:B)
no4=c(no4,oneBoot(data,fit=q,degree=2))
no1=as.data.frame(matrix(no1,ncol=2,byrow=T))
no2=as.data.frame(matrix(no2,ncol=3,byrow=T))
no3=as.data.frame(matrix(no3,ncol=2,byrow=T))
no4=as.data.frame(matrix(no4,ncol=3,byrow=T))
coeff=list(no1,no2,no3,no4)
return(coeff)
}
bootPlot = function(x, y, coeff, trueCoeff){
### x and y are the original data
### coeff is a matrix from repBoot
### trueCoeff contains the true coefficients
### that generated the data
### Make a scatter plot of data
### Add lines or curves for each row in coeff
### Use transparency
### You should use mapply to construct all
### 1000 of the bootstrapped lines of best fit
### Have a look at ?mapply for details.
### This can be done in ggplot2 or base graphics.
### Use trueCoeff to add true line/curve -
### Make the true line/curve stand out
plot(x,y)
if (nrow(coeff) == 2){
mapply(abline, coeff[1,],coeff[2,],col=rgb(0,0.2,.4,0.1))
}
if (nrow(coeff) == 3){
mapply(function(a,b,c){curve(a+b*x + c*(x^2),col=rgb(0,0.2,.4,0.1), add=TRUE)}, a=coeff[1,],b=coeff[2,],c=coeff[3,])
}
curve(trueCoeff[1]+trueCoeff[2]*x + trueCoeff[3]*(x^2),col="red",add=TRUE,lwd=3)
}
### Run your simulation by calling this function
### This function doesn't need any changing
runSim = function() {
xUnique = 1:5
trueCoeff = c(0, 1, 1)
myData = getData(coefs = trueCoeff, xs = xUnique)
expt = repBoot(data = myData)
par(mfrow = c(2, 2))
for (i in 1:4){
bootPlot(myData$x, myData$y,
coeff = expt[[i]], trueCoeff)
}
return(expt)
}
|
# Analysis of flooding and hurricane evacuation risks for populations of concern in Rhode Island
library(tidyverse)
library(sf)
library(tmap)
library(tmaptools)
library(lwgeom)
library(tigris)
options(tigris_use_cache = TRUE, tigris_class = "sf")
load("DATA/ne_layers.rds")
# Download Census TIGERLine hydrography for RI
## First, extract list of county names to use with tigris::water
ri_counties <- counties("RI") %>%
pull(NAME)
# Next, download water features for each county and rbind to one layer
ri_awater_sf <- rbind_tigris(
lapply(
ri_counties, function(x) area_water(state = "RI", county = x)
)
) %>%
st_union() %>%
st_as_sf() %>%
st_transform(., crs = 2840)
# Read in NFHL for RI. Data comes from FEMA.
# List available layers in geodatabase
# st_layers("DATA/FEMA/RI/NFHL_44_20181118.gdb")
# Read in flood hazard areas
ri_fhza_2840 <- st_read(dsn = "DATA/FEMA/RI/NFHL_44_20181118.gdb",
layer = "S_Fld_Haz_Ar") %>%
filter(FLD_ZONE != "OPEN WATER" &
!ZONE_SUBTY %in% c("AREA OF MINIMAL FLOOD HAZARD",
"AREA WITH REDUCED FLOOD RISK DUE TO LEVEE")) %>%
mutate(FLD_ZONE = as.character(FLD_ZONE)) %>% # omit unused factor levels
st_transform(., crs = 2840) %>%
st_make_valid() %>%
group_by(FLD_ZONE) %>% # aggregate flood zone polygons
summarize(count = n()) %>%
mutate(Area = st_area(.),
Interval = case_when(
FLD_ZONE == "A" ~ "100-year",
FLD_ZONE == "AE" ~ "100-year",
FLD_ZONE == "AH" ~ "100-year",
FLD_ZONE == "AO" ~ "100-year",
FLD_ZONE == "VE" ~ "100-year",
FLD_ZONE == "X" ~ "500-year"))
# crop flood zones to land areas only
ri_state_sf <- ne_states_sf_cb %>%
filter(NAME == "Rhode Island")
ri_fhza_2840_land <- ri_fhza_2840 %>%
crop_shape(., ri_state_sf, polygon = TRUE) %>%
st_difference(., ri_awater_sf) %>%
mutate(Area = st_area(.))
# Percentage of RI land within flood zones
ri_area <- as.numeric(ri_state_sf$ALAND)
# Total and percentage of land area of RI within flood zones
ri_fhza_2840_land %>%
as.data.frame() %>%
group_by(Interval) %>%
summarize(SqKm = round(as.numeric(sum(Area)/10^6),1),
SqMi = round(as.numeric(SqKm/2.59),1),
PctArea = paste0(as.character(round(as.numeric(sum(Area)/ri_area*100),1)),"%"))
# read in hurricane evacuation zone layer
ri_hea_sf <- st_read(dsn = "DATA/FEMA/RI",
layer = "Hurricane_Evacuation_Areas") %>%
mutate(EVAC = as.character(EVAC)) %>%
filter(EVAC %in% c("A","B","C")) %>%
st_transform(., crs = 2840) %>%
st_make_valid() %>%
mutate(Area = st_area(.))
# Total and percentage of land area with hurricane evacuation zones
ri_hea_sf %>%
as.data.frame() %>%
group_by(EVAC) %>%
summarize(SqKm = round(as.numeric(sum(Area)/10^6),1),
SqMi = round(as.numeric(SqKm/2.59),1),
PctArea = paste0(as.character(round(as.numeric(sum(Area)/ri_area*100),1)),"%"))
# Convert to projected local CRS EPSG:2840: NAD83(HARN) / Rhode Island
ri_blkgrp_2840 <- ne_blkgrp_sf %>%
filter(STATE == "Rhode Island") %>%
st_transform(., crs = 2840)
ri_tracts_2840 <- ne_tracts_sf %>%
filter(STATE == "Rhode Island") %>%
st_transform(., crs = 2840)
# Get rid of empty geometries
empty_geo <- st_is_empty(ri_fhza_2840)
ri_fhza_2840 <- ri_fhza_2840[!empty_geo,]
empty_geo <- st_is_empty(ri_blkgrp_2840)
ri_blkgrp_2840 <- ri_blkgrp_2840[!empty_geo,]
empty_geo <- st_is_empty(ri_tracts_2840)
ri_tracts_2840 <- ri_tracts_2840[!empty_geo,]
# Write out block groups for processing in ArcGIS
ri_blkgrp_2840 %>%
dplyr::select(GEOID) %>%
st_write(., "DATA/FEMA/RI/ri_blkgrp_2840.shp", delete_layer = TRUE)
# repeat for tracts
ri_tracts_2840 %>%
dplyr::select(GEOID) %>%
st_write(., "DATA/FEMA/RI/ri_tracts_2840.shp", delete_layer = TRUE)
# Use dasymetric mapping to calculate populations within flood zones. Approach follows method used by Qiang (2019) to eliminate unpopulated areas of census polygons and then reallocate populations to developed areas as identified in National Land Cover Dataset (NLCD).
# Perform NLCD raster-to-vector conversion, vector erase/difference, and vector intersections in ArcMap because it takes too long in R.
# In ArcMap:
# Convert NLCD raster to shapefile. Isolate undeveloped areas.
# Erase areas of ri_blkgrp_2840 and ri_tracts_2840 that overlap with undeveloped areas in NLCD shapefiles. Compute OldArea of erased polygons in sqm to identify area of developed polygons remaining.
# Intersect erased ri_blkgrps and erased ri_tracts with NFHZA and Hurricane evacuation zones. Read back into R.
# read in processed ri_blkgrps and ri_tracts
st_layers(dsn = "DATA/FEMA/RI")
ri_blkgrps_nfhza <- st_read(dsn = "DATA/FEMA/RI",
layer = "ri_blkgrps_nfhza") %>%
left_join(., as.data.frame(ri_blkgrp_2840), by = "GEOID") %>%
st_transform(., crs = 2840) %>%
mutate(NewArea = st_area(.)) %>%
st_make_valid()
ri_blkgrps_hevac <- st_read(dsn = "DATA/FEMA/RI",
layer = "ri_blkgrps_hevac") %>%
left_join(., as.data.frame(ri_blkgrp_2840), by = "GEOID") %>%
st_transform(., crs = 2840) %>%
mutate(NewArea = st_area(.)) %>%
st_make_valid()
ri_tracts_nfhza <- st_read(dsn = "DATA/FEMA/RI",
layer = "ri_tracts_nfhza") %>%
left_join(., as.data.frame(ri_tracts_2840), by = "GEOID") %>%
st_transform(., crs = 2840) %>%
mutate(NewArea = st_area(.)) %>%
st_make_valid()
ri_tracts_hevac <- st_read(dsn = "DATA/FEMA/RI",
layer = "ri_tracts_hevac") %>%
left_join(., as.data.frame(ri_tracts_2840), by = "GEOID") %>%
st_transform(., crs = 2840) %>%
mutate(NewArea = st_area(.)) %>%
st_make_valid()
# Apportion populations based on geographic proportion of intersect
ri_blkgrps_nfhza <- ri_blkgrps_nfhza %>%
mutate(RI_LOWINC = if_else(RI_INCOME == "I",totalpopE,0)) %>%
mutate(RI_LOWINC = replace_na(RI_LOWINC,0)) %>%
mutate(RI_MINORITIES = if_else(RI_MINORITY == "M", totalpopE,0)) %>%
mutate(RI_MINORITIES = replace_na(RI_MINORITIES,0)) %>%
mutate(Proportion = as.numeric(NewArea/OldArea),
NewPop = totalpopE*Proportion,
NewMinority = minorityE*Proportion,
NewUnder5 = under5E*Proportion,
NewOver64 = over64E*Proportion,
NewUnder18 = under18E*Proportion,
NewEng_limit = eng_limitE*Proportion,
NewPov = num2povE*Proportion,
NewLths = lthsE*Proportion,
NewRI_LOWINC = RI_LOWINC*Proportion,
NewRI_MINORITIES = RI_MINORITIES*Proportion)
ri_blkgrps_hevac <- ri_blkgrps_hevac %>%
mutate(RI_LOWINC = if_else(RI_INCOME == "I",totalpopE,0)) %>%
mutate(RI_LOWINC = replace_na(RI_LOWINC,0)) %>%
mutate(RI_MINORITIES = if_else(RI_MINORITY == "M", totalpopE,0)) %>%
mutate(RI_MINORITIES = replace_na(RI_MINORITIES,0)) %>%
mutate(Proportion = as.numeric(NewArea/OldArea),
NewPop = totalpopE*Proportion,
NewMinority = minorityE*Proportion,
NewUnder5 = under5E*Proportion,
NewOver64 = over64E*Proportion,
NewUnder18 = under18E*Proportion,
NewEng_limit = eng_limitE*Proportion,
NewPov = num2povE*Proportion,
NewLths = lthsE*Proportion,
NewRI_LOWINC = RI_LOWINC*Proportion,
NewRI_MINORITIES = RI_MINORITIES*Proportion)
ri_tracts_nfhza <- ri_tracts_nfhza %>%
mutate(Proportion = as.numeric(NewArea/OldArea),
NewDisabled = disabledOver18E*Proportion,
NewNoCar = HHnoCarE*Proportion)
ri_tracts_hevac <- ri_tracts_hevac %>%
mutate(Proportion = as.numeric(NewArea/OldArea),
NewDisabled = disabledOver18E*Proportion,
NewNoCar = HHnoCarE*Proportion)
# Compute total block group populations within flood zones
ri_flood_blkgrp_df <- ri_blkgrps_nfhza %>%
as.data.frame() %>%
summarize(`Total Pop` = as.integer(sum(NewPop)),
Minority = as.integer(sum(NewMinority)),
`Under 5` = as.integer(sum(NewUnder5)),
`Over 64` = as.integer(sum(NewOver64)),
`Under 18` = as.integer(sum(NewUnder18)),
`Limited English HH` = as.integer(sum(NewEng_limit)),
`Low Income` = as.integer(sum(NewPov)),
`No HS Dip` = as.integer(sum(NewLths)),
`RI Low Income` = as.integer(sum(NewRI_LOWINC)),
`RI Minority` = as.integer(sum(NewRI_MINORITIES))) %>%
gather(key = Group, value = FloodPop)
# Compute total block group populations within hurricane evac zones
ri_hevac_blkgrp_df <- ri_blkgrps_hevac %>%
as.data.frame() %>%
summarize(`Total Pop` = as.integer(sum(NewPop)),
Minority = as.integer(sum(NewMinority)),
`Under 5` = as.integer(sum(NewUnder5)),
`Over 64` = as.integer(sum(NewOver64)),
`Under 18` = as.integer(sum(NewUnder18)),
`Limited English HH` = as.integer(sum(NewEng_limit)),
`Low Income` = as.integer(sum(NewPov)),
`No HS Dip` = as.integer(sum(NewLths)),
`RI Low Income` = as.integer(sum(NewRI_LOWINC)),
`RI Minority` = as.integer(sum(NewRI_MINORITIES))) %>%
gather(key = Group, value = HevacPop)
# Compute total tract populations within flood zones
ri_flood_tracts_df <- ri_tracts_nfhza %>%
as.data.frame() %>%
summarize(`Disabled` = as.integer(sum(NewDisabled)),
`No Car HH` = as.integer(sum(NewNoCar))) %>%
gather(key = Group, value = FloodPop)
ri_hevac_tracts_df <- ri_tracts_hevac %>%
as.data.frame() %>%
summarize(`Disabled` = as.integer(sum(NewDisabled)),
`No Car HH` = as.integer(sum(NewNoCar))) %>%
gather(key = Group, value = HevacPop)
# Compute total tract populations within the state for same groups
ri_tract_flood_pops_df <- ri_tracts_2840 %>%
as.data.frame() %>%
summarize(`Disabled` = sum(disabledOver18E),
`No Car HH` = sum(HHnoCarE)) %>%
gather(key = Group, value = RIPop) %>%
left_join(.,ri_flood_tracts_df, by = "Group")
ri_tract_hevac_pops_df <- ri_tracts_2840 %>%
as.data.frame() %>%
summarize(`Disabled` = sum(disabledOver18E),
`No Car HH` = sum(HHnoCarE)) %>%
gather(key = Group, value = RIPop) %>%
left_join(.,ri_hevac_tracts_df, by = "Group")
# Compute populations for state,and join with flood pops
ri_FloodPops_df <- ri_blkgrp_2840 %>%
as.data.frame() %>%
mutate(RI_LOWINC = if_else(RI_INCOME == "I",totalpopE,0)) %>%
mutate(RI_LOWINC = replace_na(RI_LOWINC,0)) %>%
mutate(RI_MINORITIES = if_else(RI_MINORITY == "M",totalpopE,0)) %>%
mutate(RI_MINORITIES = replace_na(RI_MINORITIES,0)) %>%
summarize(`Total Pop` = sum(totalpopE),
Minority = sum(minorityE),
`Under 5` = sum(under5E),
`Over 64` = sum(over64E),
`Under 18` = sum(under18E),
`Limited English HH` = sum(eng_limitE),
`Low Income` = sum(num2povE),
`No HS Dip` = sum(lthsE),
`RI Low Income` = sum(RI_LOWINC, na.rm = TRUE),
`RI Minority` = sum(RI_MINORITIES, na.rm = TRUE)) %>%
gather(key = Group, value = RIPop) %>%
left_join(., ri_flood_blkgrp_df, by = "Group") %>%
rbind(.,ri_tract_flood_pops_df) %>%
mutate(PctFlood = FloodPop/RIPop*100)
# Compute populations for state, and join with hurricane evac pops
ri_HevacPops_df <- ri_blkgrp_2840 %>%
as.data.frame() %>%
mutate(RI_LOWINC = if_else(RI_INCOME == "I",totalpopE,0)) %>%
mutate(RI_LOWINC = replace_na(RI_LOWINC,0)) %>%
mutate(RI_MINORITIES = if_else(RI_MINORITY == "M",totalpopE,0)) %>%
mutate(RI_MINORITIES = replace_na(RI_MINORITIES,0)) %>%
summarize(`Total Pop` = sum(totalpopE),
Minority = sum(minorityE),
`Under 5` = sum(under5E),
`Over 64` = sum(over64E),
`Under 18` = sum(under18E),
`Limited English HH` = sum(eng_limitE),
`Low Income` = sum(num2povE),
`No HS Dip` = sum(lthsE),
`RI Low Income` = sum(RI_LOWINC, na.rm = TRUE),
`RI Minority` = sum(RI_MINORITIES, na.rm = TRUE)) %>%
gather(key = Group, value = RIPop) %>%
left_join(., ri_hevac_blkgrp_df, by = "Group") %>%
rbind(.,ri_tract_hevac_pops_df) %>%
mutate(PctHevac = HevacPop/RIPop*100)
# Show table of pops within flood zones
ri_FloodPops_df %>%
arrange(-FloodPop)
# Create lollipop plot of pops within flood zones
ri_FloodPops_df %>%
ggplot(aes(x = reorder(Group,-PctFlood),
y = PctFlood)) +
geom_segment(aes(x = reorder(Group,-PctFlood),
xend = reorder(Group,-PctFlood),
y = ri_FloodPops_df[1,4], yend = PctFlood),
color = "skyblue") +
geom_point(color = "blue", size = 4, alpha = 0.8) +
coord_flip() + xlab("") + ylab("") +
ggtitle("Rhode Island Populations within Flood Zones") + theme_light() +
theme(panel.grid.major.y = element_blank(),
panel.border = element_blank(),
axis.ticks.y = element_blank()) +
geom_text(aes(x = Group, y = PctFlood + 0.2 * sign(PctFlood),
label = paste0(round(PctFlood,1),"%")),
hjust = 0.1, vjust = -0.5, size = 3,
color=rgb(100,100,100, maxColorValue=255)) +
scale_y_continuous(labels = function(x) paste0(x, "%")) +
geom_hline(yintercept = ri_FloodPops_df[1,4], linetype = "dashed") +
geom_text(aes(x = "Disabled", y = 9.1, label = "Below state avg"),
color = "gray48") +
geom_segment(aes(x = "No Car HH", xend = "No Car HH", y = 10, yend = 8.2),
arrow = arrow(length = unit(0.3,"cm"))) +
geom_text(aes(x = "Low Income", y = 11.3, label = "Above state avg"),
color = "gray48") +
geom_segment(aes(x = "Under 18", xend = "Under 18", y = 10.4, yend = 12.2),
arrow = arrow(length = unit(0.3,"cm")))
# Create a dot density map of total populations and overlay on flood zones
# Create point layer of major cities for context
# Note cb=FALSE is necessary for extracting centroids from town polygons. Otherwise, if cb=TRUE, cannot extract centroids from multipolygon features.
ri_towns_sf_pts <- county_subdivisions(state = "RI", cb = TRUE) %>%
filter(NAME %in% c("Providence",
"Woonsocket",
"Pawtucket",
"Warwick",
"Bristol",
"Portsmouth",
"Newport",
"Narragansett",
"North Kingstown",
"Charlestown",
"Situate",
"Glocester")) %>%
st_transform(., crs = 2840) %>%
st_centroid(of_largest_polygon = TRUE)
# Create road layer for context
ri_highways <- tigris::primary_roads() %>%
filter(FULLNAME %in% c("I- 95","I- 195","I- 295", "US Hwy 6")) %>%
tmaptools::crop_shape(., ne_states_sf_cb) %>%
st_transform(., crs = 2840)
# Extract highway segments for labeling
I95roadSegment <- ri_highways %>%
filter(LINEARID == "110468245978")
I95roadSegment2 <- ri_highways %>%
filter(LINEARID == "1107052605232")
I295roadSegment <- ri_highways %>%
filter(LINEARID == "1104755623349")
I195roadSegment <- ri_highways %>%
filter(LINEARID == "110448466166")
# Create custom icons of highway shields
I95 <- tmap_icons(file = "https://upload.wikimedia.org/wikipedia/commons/thumb/6/61/I-95.svg/200px-I-95.svg.png")
I195 <- tmap_icons("https://upload.wikimedia.org/wikipedia/commons/thumb/f/f7/I-195.svg/200px-I-195.svg.png")
I295 <- tmap_icons("https://upload.wikimedia.org/wikipedia/commons/thumb/4/41/I-295.svg/200px-I-295.svg.png")
# Create random points, with 1 point for every 100 people
ri_totalpop_pts <- ri_blkgrp_2840 %>%
select(totalpopE) %>%
filter(totalpopE >= 100) %>%
st_sample(., size = round(.$totalpopE/100)) %>% # create 1 random point for every 100 people
st_sf(.) %>%
mutate(Group = "Total Pop")
# Map totalpop and flood zones
tm_layout(bg.color = "#e6f3f7") +
tm_shape(ri_blkgrp_2840, unit = "mi") + tm_fill(col = "white") +
tm_shape(ne_states_sf_cb) + tm_fill(col="white") +
tm_shape(ri_awater_sf) + tm_fill(col = "#e6f3f7") +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_shape(ri_totalpop_pts) + tm_dots(col = "forestgreen",
labels = "1 dot = 100 persons") +
tm_shape(ri_fhza_2840_land) +
tm_fill(col = "Interval",
palette = c("gold", "goldenrod3"),
labels = c("1% AEP (100-year)", "0.2% AEP (500-year)"),
title = "FEMA Flood Zones",
alpha = 0.6,
border.alpha = 0) +
tm_shape(ri_fhza_2840_land) +
tm_borders(col = "goldenrod1",
lwd = 0.5,
alpha = 0.6) +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_shape(ri_highways) + tm_lines(col = "seashell4", lwd = 2) +
tm_shape(I95roadSegment) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I95roadSegment2) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I195roadSegment) +
tm_symbols(shape = I195, border.lwd = NA, size = 0.25) +
tm_shape(I295roadSegment) +
tm_symbols(shape = I295, border.lwd = NA, size = 0.25) +
tm_shape(ri_towns_sf_pts) + tm_dots() +
tm_text("NAME", size = 0.5, col = "black",
xmod = 0.7, ymod = 0.2, shadow = TRUE) +
tm_scale_bar(breaks = c(0, 5, 10), text.size = 0.5,
position = c(0.6,0.005)) +
tm_add_legend(type = "fill", col = "forestgreen",
border.col = "white", border.alpha = 0,
labels = "1 dot = 100 persons",
title = "Total Population") +
tm_layout(title = "Population Distribution \nand Flood Zones",
frame = TRUE, main.title.size = 0.8,
legend.outside = TRUE,
legend.title.size = 0.8,
legend.outside.position = c("right", "top"))
# Create a dot density map of transit-dependent populations in flood zone
# Create random points, with 1 point for every 5 people
Over64_nfhza_pts <- ri_blkgrps_nfhza %>%
dplyr::select(NewOver64) %>%
filter(NewOver64 >= 5) %>%
st_sample(., size = round(.$NewOver64/5)) %>% # create 1 random point for every 5 people
st_sf(.) %>%
mutate(Group = "Over 64")
Disabled_nfhza_pts <- ri_tracts_nfhza %>%
dplyr::select(NewDisabled) %>%
filter(NewDisabled >= 5) %>%
st_sample(., size = round(.$NewDisabled/5)) %>%
st_sf(.) %>%
mutate(Group = "Disabled")
NoCarHH_nfhza_pts <- ri_tracts_nfhza %>%
dplyr::select(NewNoCar) %>%
filter(NewNoCar >= 5) %>%
st_sample(., size = round(.$NewNoCar/5)) %>%
st_sf(.) %>%
mutate(Group = "No Car HH")
# Bring them together
ri_nfhza_vulnerable <- rbind(Over64_nfhza_pts,Disabled_nfhza_pts,
NoCarHH_nfhza_pts) %>%
slice(sample(1:n())) # randomise order to avoid bias in plotting order
# Map transit-dependent pops and flood zones
tm_layout(bg.color = "#e6f3f7") +
tm_shape(ri_blkgrp_2840, unit = "mi") + tm_fill(col = "white") +
tm_shape(ne_states_sf_cb) + tm_fill(col="white") +
tm_shape(ri_awater_sf) + tm_fill(col = "#e6f3f7") +
tm_shape(ri_fhza_2840_land) +
tm_fill(col = "Interval",
palette = c("gold", "goldenrod3"),
labels = c("1% AEP (100-year)", "0.2% AEP (500-year)"),
title = "FEMA Flood Zones",
border.alpha = 0) +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_shape(ri_nfhza_vulnerable) + tm_dots(col = "Group",
palette = c("green","red","blue"),
legend.show = FALSE) +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_shape(ri_highways) + tm_lines(col = "seashell4", lwd = 2) +
tm_shape(I95roadSegment) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I95roadSegment2) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I195roadSegment) +
tm_symbols(shape = I195, border.lwd = NA, size = 0.25) +
tm_shape(I295roadSegment) +
tm_symbols(shape = I295, border.lwd = NA, size = 0.25) +
tm_shape(ri_towns_sf_pts) + tm_dots() +
tm_text("NAME", size = 0.5, col = "black",
xmod = 0.7, ymod = 0.2, shadow = TRUE) +
tm_scale_bar(breaks = c(0, 5, 10), text.size = 0.5,
position = c(0.6,0.005)) +
tm_add_legend(type = "fill", col = c("green","red","blue"),
border.col = "white", border.alpha = 0,
labels = c("Disabled", "No Car HH", "Over 64"),
title = "Population\nGroup*") +
tm_layout(title = "Transit Dependent Populations \nwithin Flood Zones",
frame = TRUE, main.title.size = 0.8,
legend.outside = TRUE,
legend.title.size = 0.8,
legend.outside.position = c("right", "top"))
# Show table of pops within hurricane evacuation zones
ri_HevacPops_df %>%
arrange(-HevacPop)
# Create lollipop plot of pops within hurricane evac zones
ri_HevacPops_df %>%
ggplot(aes(x = reorder(Group,-PctHevac),
y = PctHevac)) +
geom_segment(aes(x = reorder(Group,-PctHevac),
xend = reorder(Group,-PctHevac),
y = ri_HevacPops_df[1,4], yend = PctHevac),
color = "skyblue") +
geom_point(color = "blue", size = 4, alpha = 0.8) +
coord_flip() + xlab("") + ylab("") +
ggtitle("Rhode Island Populations within Hurricane Evacuation Zones") +
theme_light() +
theme(panel.grid.major.y = element_blank(),
panel.border = element_blank(),
axis.ticks.y = element_blank()) +
geom_text(aes(x = Group, y = PctHevac + 0.2 * sign(PctHevac),
label = paste0(round(PctHevac,1),"%")),
hjust = 0.1, vjust = -0.5, size = 3,
color=rgb(100,100,100, maxColorValue=255)) +
scale_y_continuous(labels = function(x) paste0(x, "%")) +
geom_hline(yintercept = ri_HevacPops_df[1,4], linetype = "dashed") +
geom_text(aes(x = "Disabled", y = 10.3, label = "Below state avg"),
color = "gray48") +
geom_segment(aes(x = "No Car HH", xend = "No Car HH", y = 11.4, yend = 9.6),
arrow = arrow(length = unit(0.3,"cm"))) +
geom_text(aes(x = "Low Income", y = 13, label = "Above state avg"),
color = "gray48") +
geom_segment(aes(x = "Under 18", xend = "Under 18", y = 11.8, yend = 13.6),
arrow = arrow(length = unit(0.3,"cm")))
# create simplified version of hurricane evac polygons
ri_hea_sf_agg <-
ri_hea_sf %>%
group_by(EVAC) %>%
summarize()
r_hea_cata <- ri_hea_sf_agg %>%
filter(EVAC == "A")
r_hea_catb <- ri_hea_sf_agg %>%
filter(EVAC == "B")
# Map totalpop and hurricane evacuation zones
tm_layout(bg.color = "#e6f3f7") +
tm_shape(ri_blkgrp_2840, unit = "mi") + tm_fill(col = "white") +
tm_shape(ne_states_sf_cb) + tm_fill(col="white") +
tm_shape(ri_awater_sf) + tm_fill(col = "#e6f3f7") +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_shape(ri_totalpop_pts) + tm_dots(col = "forestgreen",
labels = "1 dot = 100 persons") +
tm_shape(ri_hea_sf_agg) +
tm_fill(col = "EVAC",
palette = c("darkkhaki", "rosybrown1", "khaki"),
labels = c("A: Catgeory 1 - 2",
"B: Category 3 - 4",
"C: Category 5"),
title = "Evacuation Zone and\nHurricane Category",
alpha = 0.6) +
tm_shape(r_hea_cata) +
tm_borders(col = "darkgoldenrod",
lwd = 0.5,
alpha = 0.6) +
tm_shape(r_hea_catb) +
tm_borders(col = "rosybrown3",
lwd = 0.5,
alpha = 0.6) +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_shape(ri_highways) + tm_lines(col = "seashell4", lwd = 2) +
tm_shape(I95roadSegment) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I95roadSegment2) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I195roadSegment) +
tm_symbols(shape = I195, border.lwd = NA, size = 0.25) +
tm_shape(I295roadSegment) +
tm_symbols(shape = I295, border.lwd = NA, size = 0.25) +
tm_shape(ri_towns_sf_pts) + tm_dots() +
tm_text("NAME", size = 0.5, col = "black",
xmod = 0.7, ymod = 0.2, shadow = TRUE) +
tm_scale_bar(breaks = c(0, 5, 10), text.size = 0.5,
position = c(0.6,0.005)) +
tm_add_legend(type = "fill", col = "forestgreen",
border.col = "white", border.alpha = 0,
labels = "1 dot = 100 persons",
title = "Total Population") +
tm_layout(title = "Population Distribution \nand Hurricane\nEvacuation Zones",
frame = TRUE, main.title.size = 0.8,
legend.outside = TRUE,
legend.title.size = 0.8,
legend.outside.position = c("right", "top"))
# Create a dot density of transit-dependent populations within hurricane evacuation zones
# Create random points, with 1 point for every 5 people
Over64_hevac_pts <- ri_blkgrps_hevac %>%
dplyr::select(NewOver64) %>%
filter(NewOver64 >= 5) %>%
st_sample(., size = round(.$NewOver64/5)) %>% # create 1 random point for every 5 people
st_sf(.) %>%
mutate(Group = "Over 64")
Disabled_hevac_pts <- ri_tracts_hevac %>%
dplyr::select(NewDisabled) %>%
filter(NewDisabled >= 5) %>%
st_sample(., size = round(.$NewDisabled/5)) %>%
st_sf(.) %>%
mutate(Group = "Disabled")
NoCarHH_hevac_pts <- ri_tracts_hevac %>%
dplyr::select(NewNoCar) %>%
filter(NewNoCar >= 5) %>%
st_sample(., size = round(.$NewNoCar/5)) %>%
st_sf(.) %>%
mutate(Group = "No Car HH")
# Bring them together
ri_hevac_vulnerable <- rbind(Over64_hevac_pts,Disabled_hevac_pts,
NoCarHH_hevac_pts) %>%
slice(sample(1:n())) # randomise order to avoid bias in plotting order
# Map them out
tm_layout(bg.color = "#e6f3f7") +
tm_shape(ri_blkgrp_2840, unit = "mi") + tm_fill(col = "white") +
tm_shape(ne_states_sf_cb) + tm_fill(col="white") +
tm_shape(ri_awater_sf) + tm_fill(col = "#e6f3f7") +
tm_shape(ri_hea_sf) +
tm_fill(col = "EVAC",
palette = c("darkkhaki", "rosybrown1", "khaki"),
labels = c("A: Catgeory 1 - 2",
"B: Category 3 - 4",
"C: Category 5"),
title = "Evacuation Zone and\nHurricane Category") +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_text("STUSPS", size = 0.7, remove.overlap = TRUE, col = "gray") +
tm_shape(ri_hevac_vulnerable) + tm_dots(col = "Group",
palette = c("green","red","blue"),
legend.show = FALSE) +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_shape(ri_highways) + tm_lines(col = "seashell4", lwd = 2) +
tm_shape(I95roadSegment) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I95roadSegment2) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I195roadSegment) +
tm_symbols(shape = I195, border.lwd = NA, size = 0.25) +
tm_shape(I295roadSegment) +
tm_symbols(shape = I295, border.lwd = NA, size = 0.25) +
tm_shape(ri_towns_sf_pts) + tm_dots() +
tm_text("NAME", size = 0.5, col = "black",
xmod = 0.7, ymod = 0.2, shadow = TRUE) +
tm_scale_bar(breaks = c(0, 5, 10), text.size = 0.5,
position = c(0.6,0.005)) +
tm_add_legend(type = "fill", col = c("green","red","blue"),
border.col = "white", border.alpha = 0,
labels = c("Disabled", "No Car HH", "Over 64"),
title = "Population\nGroup*") +
tm_layout(title = "Transit Dependent Populations \nwithin Hurricane Evacuation\nZones",
frame = TRUE, main.title.size = 0.8,
legend.outside = TRUE,
legend.title.size = 0.8,
legend.outside.position = c("right", "top"))
##### TEST FOR EXPLORATION
tm_shape(ri_hea_sf) +
tm_fill(col = "EVAC",
palette = c("darkkhaki", "rosybrown1", "khaki"),
labels = c("A: Catgeory 1 - 2",
"B: Category 3 - 4"),
title = "Evacuation Zone and\nHurricane Category",
alpha = 0.6) +
tm_shape(ri_hevac_vulnerable) + tm_dots(col = "Group",
palette = c("green","red","blue"),
alpha = 0.6)
| /Evacuation_RhodeIsland.R | no_license | profLuna/Investing-for-Equity | R | false | false | 28,310 | r | # Analysis of flooding and hurricane evacuation risks for populations of concern in Rhode Island
library(tidyverse)
library(sf)
library(tmap)
library(tmaptools)
library(lwgeom)
library(tigris)
options(tigris_use_cache = TRUE, tigris_class = "sf")
load("DATA/ne_layers.rds")
# Download Census TIGERLine hydrography for RI
## First, extract list of county names to use with tigris::water
ri_counties <- counties("RI") %>%
pull(NAME)
# Next, download water features for each county and rbind to one layer
ri_awater_sf <- rbind_tigris(
lapply(
ri_counties, function(x) area_water(state = "RI", county = x)
)
) %>%
st_union() %>%
st_as_sf() %>%
st_transform(., crs = 2840)
# Read in NFHL for RI. Data comes from FEMA.
# List available layers in geodatabase
# st_layers("DATA/FEMA/RI/NFHL_44_20181118.gdb")
# Read in flood hazard areas
ri_fhza_2840 <- st_read(dsn = "DATA/FEMA/RI/NFHL_44_20181118.gdb",
layer = "S_Fld_Haz_Ar") %>%
filter(FLD_ZONE != "OPEN WATER" &
!ZONE_SUBTY %in% c("AREA OF MINIMAL FLOOD HAZARD",
"AREA WITH REDUCED FLOOD RISK DUE TO LEVEE")) %>%
mutate(FLD_ZONE = as.character(FLD_ZONE)) %>% # omit unused factor levels
st_transform(., crs = 2840) %>%
st_make_valid() %>%
group_by(FLD_ZONE) %>% # aggregate flood zone polygons
summarize(count = n()) %>%
mutate(Area = st_area(.),
Interval = case_when(
FLD_ZONE == "A" ~ "100-year",
FLD_ZONE == "AE" ~ "100-year",
FLD_ZONE == "AH" ~ "100-year",
FLD_ZONE == "AO" ~ "100-year",
FLD_ZONE == "VE" ~ "100-year",
FLD_ZONE == "X" ~ "500-year"))
# crop flood zones to land areas only
ri_state_sf <- ne_states_sf_cb %>%
filter(NAME == "Rhode Island")
ri_fhza_2840_land <- ri_fhza_2840 %>%
crop_shape(., ri_state_sf, polygon = TRUE) %>%
st_difference(., ri_awater_sf) %>%
mutate(Area = st_area(.))
# Percentage of RI land within flood zones
ri_area <- as.numeric(ri_state_sf$ALAND)
# Total and percentage of land area of RI within flood zones
ri_fhza_2840_land %>%
as.data.frame() %>%
group_by(Interval) %>%
summarize(SqKm = round(as.numeric(sum(Area)/10^6),1),
SqMi = round(as.numeric(SqKm/2.59),1),
PctArea = paste0(as.character(round(as.numeric(sum(Area)/ri_area*100),1)),"%"))
# read in hurricane evacuation zone layer
ri_hea_sf <- st_read(dsn = "DATA/FEMA/RI",
layer = "Hurricane_Evacuation_Areas") %>%
mutate(EVAC = as.character(EVAC)) %>%
filter(EVAC %in% c("A","B","C")) %>%
st_transform(., crs = 2840) %>%
st_make_valid() %>%
mutate(Area = st_area(.))
# Total and percentage of land area with hurricane evacuation zones
ri_hea_sf %>%
as.data.frame() %>%
group_by(EVAC) %>%
summarize(SqKm = round(as.numeric(sum(Area)/10^6),1),
SqMi = round(as.numeric(SqKm/2.59),1),
PctArea = paste0(as.character(round(as.numeric(sum(Area)/ri_area*100),1)),"%"))
# Convert to projected local CRS EPSG:2840: NAD83(HARN) / Rhode Island
ri_blkgrp_2840 <- ne_blkgrp_sf %>%
filter(STATE == "Rhode Island") %>%
st_transform(., crs = 2840)
ri_tracts_2840 <- ne_tracts_sf %>%
filter(STATE == "Rhode Island") %>%
st_transform(., crs = 2840)
# Get rid of empty geometries
empty_geo <- st_is_empty(ri_fhza_2840)
ri_fhza_2840 <- ri_fhza_2840[!empty_geo,]
empty_geo <- st_is_empty(ri_blkgrp_2840)
ri_blkgrp_2840 <- ri_blkgrp_2840[!empty_geo,]
empty_geo <- st_is_empty(ri_tracts_2840)
ri_tracts_2840 <- ri_tracts_2840[!empty_geo,]
# Write out block groups for processing in ArcGIS
ri_blkgrp_2840 %>%
dplyr::select(GEOID) %>%
st_write(., "DATA/FEMA/RI/ri_blkgrp_2840.shp", delete_layer = TRUE)
# repeat for tracts
ri_tracts_2840 %>%
dplyr::select(GEOID) %>%
st_write(., "DATA/FEMA/RI/ri_tracts_2840.shp", delete_layer = TRUE)
# Use dasymetric mapping to calculate populations within flood zones. Approach follows method used by Qiang (2019) to eliminate unpopulated areas of census polygons and then reallocate populations to developed areas as identified in National Land Cover Dataset (NLCD).
# Perform NLCD raster-to-vector conversion, vector erase/difference, and vector intersections in ArcMap because it takes too long in R.
# In ArcMap:
# Convert NLCD raster to shapefile. Isolate undeveloped areas.
# Erase areas of ri_blkgrp_2840 and ri_tracts_2840 that overlap with undeveloped areas in NLCD shapefiles. Compute OldArea of erased polygons in sqm to identify area of developed polygons remaining.
# Intersect erased ri_blkgrps and erased ri_tracts with NFHZA and Hurricane evacuation zones. Read back into R.
# read in processed ri_blkgrps and ri_tracts
st_layers(dsn = "DATA/FEMA/RI")
ri_blkgrps_nfhza <- st_read(dsn = "DATA/FEMA/RI",
layer = "ri_blkgrps_nfhza") %>%
left_join(., as.data.frame(ri_blkgrp_2840), by = "GEOID") %>%
st_transform(., crs = 2840) %>%
mutate(NewArea = st_area(.)) %>%
st_make_valid()
ri_blkgrps_hevac <- st_read(dsn = "DATA/FEMA/RI",
layer = "ri_blkgrps_hevac") %>%
left_join(., as.data.frame(ri_blkgrp_2840), by = "GEOID") %>%
st_transform(., crs = 2840) %>%
mutate(NewArea = st_area(.)) %>%
st_make_valid()
ri_tracts_nfhza <- st_read(dsn = "DATA/FEMA/RI",
layer = "ri_tracts_nfhza") %>%
left_join(., as.data.frame(ri_tracts_2840), by = "GEOID") %>%
st_transform(., crs = 2840) %>%
mutate(NewArea = st_area(.)) %>%
st_make_valid()
ri_tracts_hevac <- st_read(dsn = "DATA/FEMA/RI",
layer = "ri_tracts_hevac") %>%
left_join(., as.data.frame(ri_tracts_2840), by = "GEOID") %>%
st_transform(., crs = 2840) %>%
mutate(NewArea = st_area(.)) %>%
st_make_valid()
# Apportion populations based on geographic proportion of intersect
ri_blkgrps_nfhza <- ri_blkgrps_nfhza %>%
mutate(RI_LOWINC = if_else(RI_INCOME == "I",totalpopE,0)) %>%
mutate(RI_LOWINC = replace_na(RI_LOWINC,0)) %>%
mutate(RI_MINORITIES = if_else(RI_MINORITY == "M", totalpopE,0)) %>%
mutate(RI_MINORITIES = replace_na(RI_MINORITIES,0)) %>%
mutate(Proportion = as.numeric(NewArea/OldArea),
NewPop = totalpopE*Proportion,
NewMinority = minorityE*Proportion,
NewUnder5 = under5E*Proportion,
NewOver64 = over64E*Proportion,
NewUnder18 = under18E*Proportion,
NewEng_limit = eng_limitE*Proportion,
NewPov = num2povE*Proportion,
NewLths = lthsE*Proportion,
NewRI_LOWINC = RI_LOWINC*Proportion,
NewRI_MINORITIES = RI_MINORITIES*Proportion)
ri_blkgrps_hevac <- ri_blkgrps_hevac %>%
mutate(RI_LOWINC = if_else(RI_INCOME == "I",totalpopE,0)) %>%
mutate(RI_LOWINC = replace_na(RI_LOWINC,0)) %>%
mutate(RI_MINORITIES = if_else(RI_MINORITY == "M", totalpopE,0)) %>%
mutate(RI_MINORITIES = replace_na(RI_MINORITIES,0)) %>%
mutate(Proportion = as.numeric(NewArea/OldArea),
NewPop = totalpopE*Proportion,
NewMinority = minorityE*Proportion,
NewUnder5 = under5E*Proportion,
NewOver64 = over64E*Proportion,
NewUnder18 = under18E*Proportion,
NewEng_limit = eng_limitE*Proportion,
NewPov = num2povE*Proportion,
NewLths = lthsE*Proportion,
NewRI_LOWINC = RI_LOWINC*Proportion,
NewRI_MINORITIES = RI_MINORITIES*Proportion)
ri_tracts_nfhza <- ri_tracts_nfhza %>%
mutate(Proportion = as.numeric(NewArea/OldArea),
NewDisabled = disabledOver18E*Proportion,
NewNoCar = HHnoCarE*Proportion)
ri_tracts_hevac <- ri_tracts_hevac %>%
mutate(Proportion = as.numeric(NewArea/OldArea),
NewDisabled = disabledOver18E*Proportion,
NewNoCar = HHnoCarE*Proportion)
# Compute total block group populations within flood zones
ri_flood_blkgrp_df <- ri_blkgrps_nfhza %>%
as.data.frame() %>%
summarize(`Total Pop` = as.integer(sum(NewPop)),
Minority = as.integer(sum(NewMinority)),
`Under 5` = as.integer(sum(NewUnder5)),
`Over 64` = as.integer(sum(NewOver64)),
`Under 18` = as.integer(sum(NewUnder18)),
`Limited English HH` = as.integer(sum(NewEng_limit)),
`Low Income` = as.integer(sum(NewPov)),
`No HS Dip` = as.integer(sum(NewLths)),
`RI Low Income` = as.integer(sum(NewRI_LOWINC)),
`RI Minority` = as.integer(sum(NewRI_MINORITIES))) %>%
gather(key = Group, value = FloodPop)
# Compute total block group populations within hurricane evac zones
ri_hevac_blkgrp_df <- ri_blkgrps_hevac %>%
as.data.frame() %>%
summarize(`Total Pop` = as.integer(sum(NewPop)),
Minority = as.integer(sum(NewMinority)),
`Under 5` = as.integer(sum(NewUnder5)),
`Over 64` = as.integer(sum(NewOver64)),
`Under 18` = as.integer(sum(NewUnder18)),
`Limited English HH` = as.integer(sum(NewEng_limit)),
`Low Income` = as.integer(sum(NewPov)),
`No HS Dip` = as.integer(sum(NewLths)),
`RI Low Income` = as.integer(sum(NewRI_LOWINC)),
`RI Minority` = as.integer(sum(NewRI_MINORITIES))) %>%
gather(key = Group, value = HevacPop)
# Compute total tract populations within flood zones
ri_flood_tracts_df <- ri_tracts_nfhza %>%
as.data.frame() %>%
summarize(`Disabled` = as.integer(sum(NewDisabled)),
`No Car HH` = as.integer(sum(NewNoCar))) %>%
gather(key = Group, value = FloodPop)
ri_hevac_tracts_df <- ri_tracts_hevac %>%
as.data.frame() %>%
summarize(`Disabled` = as.integer(sum(NewDisabled)),
`No Car HH` = as.integer(sum(NewNoCar))) %>%
gather(key = Group, value = HevacPop)
# Compute total tract populations within the state for same groups
ri_tract_flood_pops_df <- ri_tracts_2840 %>%
as.data.frame() %>%
summarize(`Disabled` = sum(disabledOver18E),
`No Car HH` = sum(HHnoCarE)) %>%
gather(key = Group, value = RIPop) %>%
left_join(.,ri_flood_tracts_df, by = "Group")
ri_tract_hevac_pops_df <- ri_tracts_2840 %>%
as.data.frame() %>%
summarize(`Disabled` = sum(disabledOver18E),
`No Car HH` = sum(HHnoCarE)) %>%
gather(key = Group, value = RIPop) %>%
left_join(.,ri_hevac_tracts_df, by = "Group")
# Compute populations for state,and join with flood pops
ri_FloodPops_df <- ri_blkgrp_2840 %>%
as.data.frame() %>%
mutate(RI_LOWINC = if_else(RI_INCOME == "I",totalpopE,0)) %>%
mutate(RI_LOWINC = replace_na(RI_LOWINC,0)) %>%
mutate(RI_MINORITIES = if_else(RI_MINORITY == "M",totalpopE,0)) %>%
mutate(RI_MINORITIES = replace_na(RI_MINORITIES,0)) %>%
summarize(`Total Pop` = sum(totalpopE),
Minority = sum(minorityE),
`Under 5` = sum(under5E),
`Over 64` = sum(over64E),
`Under 18` = sum(under18E),
`Limited English HH` = sum(eng_limitE),
`Low Income` = sum(num2povE),
`No HS Dip` = sum(lthsE),
`RI Low Income` = sum(RI_LOWINC, na.rm = TRUE),
`RI Minority` = sum(RI_MINORITIES, na.rm = TRUE)) %>%
gather(key = Group, value = RIPop) %>%
left_join(., ri_flood_blkgrp_df, by = "Group") %>%
rbind(.,ri_tract_flood_pops_df) %>%
mutate(PctFlood = FloodPop/RIPop*100)
# Compute populations for state, and join with hurricane evac pops
ri_HevacPops_df <- ri_blkgrp_2840 %>%
as.data.frame() %>%
mutate(RI_LOWINC = if_else(RI_INCOME == "I",totalpopE,0)) %>%
mutate(RI_LOWINC = replace_na(RI_LOWINC,0)) %>%
mutate(RI_MINORITIES = if_else(RI_MINORITY == "M",totalpopE,0)) %>%
mutate(RI_MINORITIES = replace_na(RI_MINORITIES,0)) %>%
summarize(`Total Pop` = sum(totalpopE),
Minority = sum(minorityE),
`Under 5` = sum(under5E),
`Over 64` = sum(over64E),
`Under 18` = sum(under18E),
`Limited English HH` = sum(eng_limitE),
`Low Income` = sum(num2povE),
`No HS Dip` = sum(lthsE),
`RI Low Income` = sum(RI_LOWINC, na.rm = TRUE),
`RI Minority` = sum(RI_MINORITIES, na.rm = TRUE)) %>%
gather(key = Group, value = RIPop) %>%
left_join(., ri_hevac_blkgrp_df, by = "Group") %>%
rbind(.,ri_tract_hevac_pops_df) %>%
mutate(PctHevac = HevacPop/RIPop*100)
# Show table of pops within flood zones
ri_FloodPops_df %>%
arrange(-FloodPop)
# Create lollipop plot of pops within flood zones
ri_FloodPops_df %>%
ggplot(aes(x = reorder(Group,-PctFlood),
y = PctFlood)) +
geom_segment(aes(x = reorder(Group,-PctFlood),
xend = reorder(Group,-PctFlood),
y = ri_FloodPops_df[1,4], yend = PctFlood),
color = "skyblue") +
geom_point(color = "blue", size = 4, alpha = 0.8) +
coord_flip() + xlab("") + ylab("") +
ggtitle("Rhode Island Populations within Flood Zones") + theme_light() +
theme(panel.grid.major.y = element_blank(),
panel.border = element_blank(),
axis.ticks.y = element_blank()) +
geom_text(aes(x = Group, y = PctFlood + 0.2 * sign(PctFlood),
label = paste0(round(PctFlood,1),"%")),
hjust = 0.1, vjust = -0.5, size = 3,
color=rgb(100,100,100, maxColorValue=255)) +
scale_y_continuous(labels = function(x) paste0(x, "%")) +
geom_hline(yintercept = ri_FloodPops_df[1,4], linetype = "dashed") +
geom_text(aes(x = "Disabled", y = 9.1, label = "Below state avg"),
color = "gray48") +
geom_segment(aes(x = "No Car HH", xend = "No Car HH", y = 10, yend = 8.2),
arrow = arrow(length = unit(0.3,"cm"))) +
geom_text(aes(x = "Low Income", y = 11.3, label = "Above state avg"),
color = "gray48") +
geom_segment(aes(x = "Under 18", xend = "Under 18", y = 10.4, yend = 12.2),
arrow = arrow(length = unit(0.3,"cm")))
# Create a dot density map of total populations and overlay on flood zones
# Create point layer of major cities for context
# Note cb=FALSE is necessary for extracting centroids from town polygons. Otherwise, if cb=TRUE, cannot extract centroids from multipolygon features.
ri_towns_sf_pts <- county_subdivisions(state = "RI", cb = TRUE) %>%
filter(NAME %in% c("Providence",
"Woonsocket",
"Pawtucket",
"Warwick",
"Bristol",
"Portsmouth",
"Newport",
"Narragansett",
"North Kingstown",
"Charlestown",
"Situate",
"Glocester")) %>%
st_transform(., crs = 2840) %>%
st_centroid(of_largest_polygon = TRUE)
# Create road layer for context
ri_highways <- tigris::primary_roads() %>%
filter(FULLNAME %in% c("I- 95","I- 195","I- 295", "US Hwy 6")) %>%
tmaptools::crop_shape(., ne_states_sf_cb) %>%
st_transform(., crs = 2840)
# Extract highway segments for labeling
I95roadSegment <- ri_highways %>%
filter(LINEARID == "110468245978")
I95roadSegment2 <- ri_highways %>%
filter(LINEARID == "1107052605232")
I295roadSegment <- ri_highways %>%
filter(LINEARID == "1104755623349")
I195roadSegment <- ri_highways %>%
filter(LINEARID == "110448466166")
# Create custom icons of highway shields
I95 <- tmap_icons(file = "https://upload.wikimedia.org/wikipedia/commons/thumb/6/61/I-95.svg/200px-I-95.svg.png")
I195 <- tmap_icons("https://upload.wikimedia.org/wikipedia/commons/thumb/f/f7/I-195.svg/200px-I-195.svg.png")
I295 <- tmap_icons("https://upload.wikimedia.org/wikipedia/commons/thumb/4/41/I-295.svg/200px-I-295.svg.png")
# Create random points, with 1 point for every 100 people
ri_totalpop_pts <- ri_blkgrp_2840 %>%
select(totalpopE) %>%
filter(totalpopE >= 100) %>%
st_sample(., size = round(.$totalpopE/100)) %>% # create 1 random point for every 100 people
st_sf(.) %>%
mutate(Group = "Total Pop")
# Map totalpop and flood zones
tm_layout(bg.color = "#e6f3f7") +
tm_shape(ri_blkgrp_2840, unit = "mi") + tm_fill(col = "white") +
tm_shape(ne_states_sf_cb) + tm_fill(col="white") +
tm_shape(ri_awater_sf) + tm_fill(col = "#e6f3f7") +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_shape(ri_totalpop_pts) + tm_dots(col = "forestgreen",
labels = "1 dot = 100 persons") +
tm_shape(ri_fhza_2840_land) +
tm_fill(col = "Interval",
palette = c("gold", "goldenrod3"),
labels = c("1% AEP (100-year)", "0.2% AEP (500-year)"),
title = "FEMA Flood Zones",
alpha = 0.6,
border.alpha = 0) +
tm_shape(ri_fhza_2840_land) +
tm_borders(col = "goldenrod1",
lwd = 0.5,
alpha = 0.6) +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_shape(ri_highways) + tm_lines(col = "seashell4", lwd = 2) +
tm_shape(I95roadSegment) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I95roadSegment2) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I195roadSegment) +
tm_symbols(shape = I195, border.lwd = NA, size = 0.25) +
tm_shape(I295roadSegment) +
tm_symbols(shape = I295, border.lwd = NA, size = 0.25) +
tm_shape(ri_towns_sf_pts) + tm_dots() +
tm_text("NAME", size = 0.5, col = "black",
xmod = 0.7, ymod = 0.2, shadow = TRUE) +
tm_scale_bar(breaks = c(0, 5, 10), text.size = 0.5,
position = c(0.6,0.005)) +
tm_add_legend(type = "fill", col = "forestgreen",
border.col = "white", border.alpha = 0,
labels = "1 dot = 100 persons",
title = "Total Population") +
tm_layout(title = "Population Distribution \nand Flood Zones",
frame = TRUE, main.title.size = 0.8,
legend.outside = TRUE,
legend.title.size = 0.8,
legend.outside.position = c("right", "top"))
# Create a dot density map of transit-dependent populations in flood zone
# Create random points, with 1 point for every 5 people
Over64_nfhza_pts <- ri_blkgrps_nfhza %>%
dplyr::select(NewOver64) %>%
filter(NewOver64 >= 5) %>%
st_sample(., size = round(.$NewOver64/5)) %>% # create 1 random point for every 5 people
st_sf(.) %>%
mutate(Group = "Over 64")
Disabled_nfhza_pts <- ri_tracts_nfhza %>%
dplyr::select(NewDisabled) %>%
filter(NewDisabled >= 5) %>%
st_sample(., size = round(.$NewDisabled/5)) %>%
st_sf(.) %>%
mutate(Group = "Disabled")
NoCarHH_nfhza_pts <- ri_tracts_nfhza %>%
dplyr::select(NewNoCar) %>%
filter(NewNoCar >= 5) %>%
st_sample(., size = round(.$NewNoCar/5)) %>%
st_sf(.) %>%
mutate(Group = "No Car HH")
# Bring them together
ri_nfhza_vulnerable <- rbind(Over64_nfhza_pts,Disabled_nfhza_pts,
NoCarHH_nfhza_pts) %>%
slice(sample(1:n())) # randomise order to avoid bias in plotting order
# Map transit-dependent pops and flood zones
tm_layout(bg.color = "#e6f3f7") +
tm_shape(ri_blkgrp_2840, unit = "mi") + tm_fill(col = "white") +
tm_shape(ne_states_sf_cb) + tm_fill(col="white") +
tm_shape(ri_awater_sf) + tm_fill(col = "#e6f3f7") +
tm_shape(ri_fhza_2840_land) +
tm_fill(col = "Interval",
palette = c("gold", "goldenrod3"),
labels = c("1% AEP (100-year)", "0.2% AEP (500-year)"),
title = "FEMA Flood Zones",
border.alpha = 0) +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_shape(ri_nfhza_vulnerable) + tm_dots(col = "Group",
palette = c("green","red","blue"),
legend.show = FALSE) +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_shape(ri_highways) + tm_lines(col = "seashell4", lwd = 2) +
tm_shape(I95roadSegment) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I95roadSegment2) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I195roadSegment) +
tm_symbols(shape = I195, border.lwd = NA, size = 0.25) +
tm_shape(I295roadSegment) +
tm_symbols(shape = I295, border.lwd = NA, size = 0.25) +
tm_shape(ri_towns_sf_pts) + tm_dots() +
tm_text("NAME", size = 0.5, col = "black",
xmod = 0.7, ymod = 0.2, shadow = TRUE) +
tm_scale_bar(breaks = c(0, 5, 10), text.size = 0.5,
position = c(0.6,0.005)) +
tm_add_legend(type = "fill", col = c("green","red","blue"),
border.col = "white", border.alpha = 0,
labels = c("Disabled", "No Car HH", "Over 64"),
title = "Population\nGroup*") +
tm_layout(title = "Transit Dependent Populations \nwithin Flood Zones",
frame = TRUE, main.title.size = 0.8,
legend.outside = TRUE,
legend.title.size = 0.8,
legend.outside.position = c("right", "top"))
# Show table of pops within hurricane evacuation zones
ri_HevacPops_df %>%
arrange(-HevacPop)
# Create lollipop plot of pops within hurricane evac zones
ri_HevacPops_df %>%
ggplot(aes(x = reorder(Group,-PctHevac),
y = PctHevac)) +
geom_segment(aes(x = reorder(Group,-PctHevac),
xend = reorder(Group,-PctHevac),
y = ri_HevacPops_df[1,4], yend = PctHevac),
color = "skyblue") +
geom_point(color = "blue", size = 4, alpha = 0.8) +
coord_flip() + xlab("") + ylab("") +
ggtitle("Rhode Island Populations within Hurricane Evacuation Zones") +
theme_light() +
theme(panel.grid.major.y = element_blank(),
panel.border = element_blank(),
axis.ticks.y = element_blank()) +
geom_text(aes(x = Group, y = PctHevac + 0.2 * sign(PctHevac),
label = paste0(round(PctHevac,1),"%")),
hjust = 0.1, vjust = -0.5, size = 3,
color=rgb(100,100,100, maxColorValue=255)) +
scale_y_continuous(labels = function(x) paste0(x, "%")) +
geom_hline(yintercept = ri_HevacPops_df[1,4], linetype = "dashed") +
geom_text(aes(x = "Disabled", y = 10.3, label = "Below state avg"),
color = "gray48") +
geom_segment(aes(x = "No Car HH", xend = "No Car HH", y = 11.4, yend = 9.6),
arrow = arrow(length = unit(0.3,"cm"))) +
geom_text(aes(x = "Low Income", y = 13, label = "Above state avg"),
color = "gray48") +
geom_segment(aes(x = "Under 18", xend = "Under 18", y = 11.8, yend = 13.6),
arrow = arrow(length = unit(0.3,"cm")))
# create simplified version of hurricane evac polygons
ri_hea_sf_agg <-
ri_hea_sf %>%
group_by(EVAC) %>%
summarize()
r_hea_cata <- ri_hea_sf_agg %>%
filter(EVAC == "A")
r_hea_catb <- ri_hea_sf_agg %>%
filter(EVAC == "B")
# Map totalpop and hurricane evacuation zones
tm_layout(bg.color = "#e6f3f7") +
tm_shape(ri_blkgrp_2840, unit = "mi") + tm_fill(col = "white") +
tm_shape(ne_states_sf_cb) + tm_fill(col="white") +
tm_shape(ri_awater_sf) + tm_fill(col = "#e6f3f7") +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_shape(ri_totalpop_pts) + tm_dots(col = "forestgreen",
labels = "1 dot = 100 persons") +
tm_shape(ri_hea_sf_agg) +
tm_fill(col = "EVAC",
palette = c("darkkhaki", "rosybrown1", "khaki"),
labels = c("A: Catgeory 1 - 2",
"B: Category 3 - 4",
"C: Category 5"),
title = "Evacuation Zone and\nHurricane Category",
alpha = 0.6) +
tm_shape(r_hea_cata) +
tm_borders(col = "darkgoldenrod",
lwd = 0.5,
alpha = 0.6) +
tm_shape(r_hea_catb) +
tm_borders(col = "rosybrown3",
lwd = 0.5,
alpha = 0.6) +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_shape(ri_highways) + tm_lines(col = "seashell4", lwd = 2) +
tm_shape(I95roadSegment) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I95roadSegment2) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I195roadSegment) +
tm_symbols(shape = I195, border.lwd = NA, size = 0.25) +
tm_shape(I295roadSegment) +
tm_symbols(shape = I295, border.lwd = NA, size = 0.25) +
tm_shape(ri_towns_sf_pts) + tm_dots() +
tm_text("NAME", size = 0.5, col = "black",
xmod = 0.7, ymod = 0.2, shadow = TRUE) +
tm_scale_bar(breaks = c(0, 5, 10), text.size = 0.5,
position = c(0.6,0.005)) +
tm_add_legend(type = "fill", col = "forestgreen",
border.col = "white", border.alpha = 0,
labels = "1 dot = 100 persons",
title = "Total Population") +
tm_layout(title = "Population Distribution \nand Hurricane\nEvacuation Zones",
frame = TRUE, main.title.size = 0.8,
legend.outside = TRUE,
legend.title.size = 0.8,
legend.outside.position = c("right", "top"))
# Create a dot density of transit-dependent populations within hurricane evacuation zones
# Create random points, with 1 point for every 5 people
Over64_hevac_pts <- ri_blkgrps_hevac %>%
dplyr::select(NewOver64) %>%
filter(NewOver64 >= 5) %>%
st_sample(., size = round(.$NewOver64/5)) %>% # create 1 random point for every 5 people
st_sf(.) %>%
mutate(Group = "Over 64")
Disabled_hevac_pts <- ri_tracts_hevac %>%
dplyr::select(NewDisabled) %>%
filter(NewDisabled >= 5) %>%
st_sample(., size = round(.$NewDisabled/5)) %>%
st_sf(.) %>%
mutate(Group = "Disabled")
NoCarHH_hevac_pts <- ri_tracts_hevac %>%
dplyr::select(NewNoCar) %>%
filter(NewNoCar >= 5) %>%
st_sample(., size = round(.$NewNoCar/5)) %>%
st_sf(.) %>%
mutate(Group = "No Car HH")
# Bring them together
ri_hevac_vulnerable <- rbind(Over64_hevac_pts,Disabled_hevac_pts,
NoCarHH_hevac_pts) %>%
slice(sample(1:n())) # randomise order to avoid bias in plotting order
# Map them out
tm_layout(bg.color = "#e6f3f7") +
tm_shape(ri_blkgrp_2840, unit = "mi") + tm_fill(col = "white") +
tm_shape(ne_states_sf_cb) + tm_fill(col="white") +
tm_shape(ri_awater_sf) + tm_fill(col = "#e6f3f7") +
tm_shape(ri_hea_sf) +
tm_fill(col = "EVAC",
palette = c("darkkhaki", "rosybrown1", "khaki"),
labels = c("A: Catgeory 1 - 2",
"B: Category 3 - 4",
"C: Category 5"),
title = "Evacuation Zone and\nHurricane Category") +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_text("STUSPS", size = 0.7, remove.overlap = TRUE, col = "gray") +
tm_shape(ri_hevac_vulnerable) + tm_dots(col = "Group",
palette = c("green","red","blue"),
legend.show = FALSE) +
tm_shape(ne_states_sf_cb) + tm_borders() +
tm_shape(ri_highways) + tm_lines(col = "seashell4", lwd = 2) +
tm_shape(I95roadSegment) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I95roadSegment2) +
tm_symbols(shape = I95, border.lwd = NA, size = 0.25) +
tm_shape(I195roadSegment) +
tm_symbols(shape = I195, border.lwd = NA, size = 0.25) +
tm_shape(I295roadSegment) +
tm_symbols(shape = I295, border.lwd = NA, size = 0.25) +
tm_shape(ri_towns_sf_pts) + tm_dots() +
tm_text("NAME", size = 0.5, col = "black",
xmod = 0.7, ymod = 0.2, shadow = TRUE) +
tm_scale_bar(breaks = c(0, 5, 10), text.size = 0.5,
position = c(0.6,0.005)) +
tm_add_legend(type = "fill", col = c("green","red","blue"),
border.col = "white", border.alpha = 0,
labels = c("Disabled", "No Car HH", "Over 64"),
title = "Population\nGroup*") +
tm_layout(title = "Transit Dependent Populations \nwithin Hurricane Evacuation\nZones",
frame = TRUE, main.title.size = 0.8,
legend.outside = TRUE,
legend.title.size = 0.8,
legend.outside.position = c("right", "top"))
##### TEST FOR EXPLORATION
tm_shape(ri_hea_sf) +
tm_fill(col = "EVAC",
palette = c("darkkhaki", "rosybrown1", "khaki"),
labels = c("A: Catgeory 1 - 2",
"B: Category 3 - 4"),
title = "Evacuation Zone and\nHurricane Category",
alpha = 0.6) +
tm_shape(ri_hevac_vulnerable) + tm_dots(col = "Group",
palette = c("green","red","blue"),
alpha = 0.6)
|
hover_reply <- function(id, uri, workspace, document, position) {
line <- position$line
character <- position$character
if (!check_scope(uri, document, line)) {
return(Response$new(id))
}
hover <- detect_hover(document, line, character)
logger$info("hover: ", hover)
matches <- stringr::str_match(
hover, "(?:([a-zA-Z][a-zA-Z0-9]+)(:::?))?([a-zA-Z0-9_.]*)$")
contents <- tryCatch(
workspace$get_help(matches[4], matches[2]),
error = function(e) list())
if (is.null(contents)) {
Response$new(id)
} else {
Response$new(
id,
result = list(
contents = contents
)
)
}
}
| /R/hover.R | no_license | renkun-ken/languageserver | R | false | false | 723 | r | hover_reply <- function(id, uri, workspace, document, position) {
line <- position$line
character <- position$character
if (!check_scope(uri, document, line)) {
return(Response$new(id))
}
hover <- detect_hover(document, line, character)
logger$info("hover: ", hover)
matches <- stringr::str_match(
hover, "(?:([a-zA-Z][a-zA-Z0-9]+)(:::?))?([a-zA-Z0-9_.]*)$")
contents <- tryCatch(
workspace$get_help(matches[4], matches[2]),
error = function(e) list())
if (is.null(contents)) {
Response$new(id)
} else {
Response$new(
id,
result = list(
contents = contents
)
)
}
}
|
library("forecast")
library("Mcomp")
library("Rlgt")
library("RlgtLik")
#M3.data <- append(subset(M3,"yearly"), append(subset(M3,"quarterly"), subset(M3,"monthly")))
M3.data <- subset(M3,"yearly")
#M3.data <- subset(M3,"monthly")
nseries <- length(M3.data)
#M3.data[[2]]
#M3[["N0001"]]
#str(M3)
mySgeApply <- lapply
#library(parallel)
#mySgeApply <- function(...) { mclapply(..., mc.cores=4)}
#set.seed(8)
set.seed(5)
#stanModLGT <- init.lgt()
#nseries <- 5
curr_series <- 250
#options(error=recover)
#1:nseries
#
forecasts <- mySgeApply(1:nseries, function(curr_series) {
cat(curr_series, "\n")
sizeTestSet <- length(M3.data[[curr_series]]$xx)
data.train <- M3.data[[curr_series]]$x
mod <- list()
forecasts <- list()
#benchmarks
mod[["etsAAN"]] <- ets(data.train, model="AAN")
forecasts[["etsAAN"]] <- forecast(mod[["etsAAN"]], PI=FALSE, h=sizeTestSet)$mean
mod[["ets"]] <- ets(data.train)
forecasts[["ets"]] <- forecast(mod[["ets"]], PI=FALSE, h=sizeTestSet)$mean
mod[["etsLGT"]] <- etsLGT(data.train, bounds="usual")
forecasts[["etsLGT"]] <- forecast(mod[["etsLGT"]], PI=FALSE, h=sizeTestSet, simulate=TRUE)$mean
mod[["etsLGTcmaes"]] <- etsLGT(data.train, , bounds="usual", solver="malschains_c",
control=malschains.control(ls="cmaes", lsOnly=TRUE))
forecasts[["etsLGTcmaes"]] <- forecast(mod[["etsLGTcmaes"]], PI=FALSE, h=sizeTestSet, simulate=TRUE)$mean
# mod[["etsDMalsCh"]] <- etsLGT(data.train, model="AAN", damped=TRUE, solver="malschains_c",
# control=malschains.control(popsize = 5000, ls="cmaes"), maxit=50000)
# forecasts[["etsDMalsCh"]] <- forecast(mod[["etsDMalsCh"]], PI=FALSE, h=sizeTestSet, simulate=TRUE)$mean
# set.seed(curr_series)
# mod[["baggedETS"]] <- baggedETS(data.train)
# forecasts[["baggedETS"]] <- forecast(mod[["baggedETS"]], h=sizeTestSet)$mean
#
# set.seed(curr_series)
# mod[["ets"]] <- ets(data.train)
# forecasts[["ets"]] <- forecast(mod[["ets"]], h=sizeTestSet)$mean
#
#--------------------------------
#Fit LGT model
# mod[["lgt"]] <- fit.lgt(data.train, stanModel=stanModLGT, ncores=4)
# forecasts[["lgt"]] <- forecast(mod[["lgt"]], h = sizeTestSet)
#
#
# set.seed(curr_series)
# forecasts[["baggedETSold"]] <- forecast:::forecast.baggedETSold(data.train, h=sizeTestSet)$mean
#
forecasts
})
print("finished")
data.test <- M3.data[[curr_series]]$xx
#names(mod[["etsDcmaes"]])
#
#mod[["etsDcmaes"]]$par
#
#mod[["lgt"]]
#
#par(mfrow=c(2,1))
#plot(forecast(mod[["etsDcmaes"]], h=6))
#lines(data.test)
#
#plot(forecast(mod[["lgt"]], h=6))
#lines(data.test)
##now run evalExperiments
#names(res.claw)
stanVec <- mod[["lgt"]]$paramMeans
mod[["etsD"]]$par
mod[["lgt"]]$paramMeans
oldPar <- mod[["etsDcmaes"]]$par
oldState <- mod[["etsDcmaes"]]$state
oldParams <- mod[["lgt"]]$params
#obj$state <- oldState
newPar <- c(alpha=stanVec$levSm, beta=stanVec$bSm,
phi=stanVec$locTrendFract, lambda=stanVec$coefTrend,
rho=stanVec$powTrend, l=stanVec$l[1], b=stanVec$b[1])
newState <- t(rbind(stanVec$l, stanVec$b))
#TODO: why are vectors l and b one value shorter?? this could be a problem
#currently, I just duplicate the first value
newState <- rbind(newState[1,], newState)
colnames(newState) <- c("l", "b")
newState <- ts(newState)
tspx <- tsp(mod[["etsDcmaes"]]$state)
#tspx[1] <- tspx[1]+1
tsp(newState) <- tspx
mod[["etsDcmaes"]]$state <- newState
mod[["etsDcmaes"]]$initstate <- mod[["etsDcmaes"]]$state[1,]
mod[["etsDcmaes"]]$par <- newPar
mod[["lgt_orig"]] <- mod[["lgt"]]
mod[["lgt"]]$params <- mod[["lgt"]]$paramMeans
mod[["lgt"]]$params[["l"]] <- t(mod[["lgt"]]$params[["l"]])
mod[["lgt"]]$params[["b"]] <- t(mod[["lgt"]]$params[["b"]])
#pdf(file="/home/bergmeir/20161217_series250_forecasts_LGT.pdf")
par(mfrow=c(2,2))
plot(forecast(mod[["etsDcmaes"]], simulate=TRUE, PI=FALSE, h=sizeTestSet), main="paramMeans, my forecast func")
#plot(forecast(mod[["etsDMalsCh"]], simulate=TRUE, PI=FALSE, h=sizeTestSet), main="1", ylim=c(3000,6000))
lines(data.test)
plot(forecast(mod[["etsD"]], simulate=TRUE, PI=FALSE, h=sizeTestSet), main="my implementation of LGT", ylim=c(3000,6000))
lines(data.test)
plot(forecast(mod[["lgt"]], h=sizeTestSet), main="paramMeans used for forecasting")
lines(data.test)
plot(forecast(mod[["lgt_orig"]], h=sizeTestSet), main="Original LGT")
lines(data.test)
#dev.off()
mod[["etsDMalsCh"]]$par
mod[["lgt_orig"]]$paramMeans
forecast(mod[["etsDcmaes"]], PI=FALSE, h=sizeTestSet, simulate=TRUE, h=6)
forecast(mod[["etsDMalsCh"]], PI=FALSE, h=sizeTestSet, simulate=TRUE)
names(mod[["etsDcmaes"]])
#TODO: forecasts should be more or less the same, but they are not
#obj <- mod[["etsDcmaes"]]
#obj$state[length(obj$x)+1,]
#obj$state
#
#
#obj$x
#
#
| /scripts/runExperimentsLGT.R | no_license | cbergmeir/Rlgt | R | false | false | 4,945 | r | library("forecast")
library("Mcomp")
library("Rlgt")
library("RlgtLik")
#M3.data <- append(subset(M3,"yearly"), append(subset(M3,"quarterly"), subset(M3,"monthly")))
M3.data <- subset(M3,"yearly")
#M3.data <- subset(M3,"monthly")
nseries <- length(M3.data)
#M3.data[[2]]
#M3[["N0001"]]
#str(M3)
mySgeApply <- lapply
#library(parallel)
#mySgeApply <- function(...) { mclapply(..., mc.cores=4)}
#set.seed(8)
set.seed(5)
#stanModLGT <- init.lgt()
#nseries <- 5
curr_series <- 250
#options(error=recover)
#1:nseries
#
forecasts <- mySgeApply(1:nseries, function(curr_series) {
cat(curr_series, "\n")
sizeTestSet <- length(M3.data[[curr_series]]$xx)
data.train <- M3.data[[curr_series]]$x
mod <- list()
forecasts <- list()
#benchmarks
mod[["etsAAN"]] <- ets(data.train, model="AAN")
forecasts[["etsAAN"]] <- forecast(mod[["etsAAN"]], PI=FALSE, h=sizeTestSet)$mean
mod[["ets"]] <- ets(data.train)
forecasts[["ets"]] <- forecast(mod[["ets"]], PI=FALSE, h=sizeTestSet)$mean
mod[["etsLGT"]] <- etsLGT(data.train, bounds="usual")
forecasts[["etsLGT"]] <- forecast(mod[["etsLGT"]], PI=FALSE, h=sizeTestSet, simulate=TRUE)$mean
mod[["etsLGTcmaes"]] <- etsLGT(data.train, , bounds="usual", solver="malschains_c",
control=malschains.control(ls="cmaes", lsOnly=TRUE))
forecasts[["etsLGTcmaes"]] <- forecast(mod[["etsLGTcmaes"]], PI=FALSE, h=sizeTestSet, simulate=TRUE)$mean
# mod[["etsDMalsCh"]] <- etsLGT(data.train, model="AAN", damped=TRUE, solver="malschains_c",
# control=malschains.control(popsize = 5000, ls="cmaes"), maxit=50000)
# forecasts[["etsDMalsCh"]] <- forecast(mod[["etsDMalsCh"]], PI=FALSE, h=sizeTestSet, simulate=TRUE)$mean
# set.seed(curr_series)
# mod[["baggedETS"]] <- baggedETS(data.train)
# forecasts[["baggedETS"]] <- forecast(mod[["baggedETS"]], h=sizeTestSet)$mean
#
# set.seed(curr_series)
# mod[["ets"]] <- ets(data.train)
# forecasts[["ets"]] <- forecast(mod[["ets"]], h=sizeTestSet)$mean
#
#--------------------------------
#Fit LGT model
# mod[["lgt"]] <- fit.lgt(data.train, stanModel=stanModLGT, ncores=4)
# forecasts[["lgt"]] <- forecast(mod[["lgt"]], h = sizeTestSet)
#
#
# set.seed(curr_series)
# forecasts[["baggedETSold"]] <- forecast:::forecast.baggedETSold(data.train, h=sizeTestSet)$mean
#
forecasts
})
print("finished")
data.test <- M3.data[[curr_series]]$xx
#names(mod[["etsDcmaes"]])
#
#mod[["etsDcmaes"]]$par
#
#mod[["lgt"]]
#
#par(mfrow=c(2,1))
#plot(forecast(mod[["etsDcmaes"]], h=6))
#lines(data.test)
#
#plot(forecast(mod[["lgt"]], h=6))
#lines(data.test)
##now run evalExperiments
#names(res.claw)
stanVec <- mod[["lgt"]]$paramMeans
mod[["etsD"]]$par
mod[["lgt"]]$paramMeans
oldPar <- mod[["etsDcmaes"]]$par
oldState <- mod[["etsDcmaes"]]$state
oldParams <- mod[["lgt"]]$params
#obj$state <- oldState
newPar <- c(alpha=stanVec$levSm, beta=stanVec$bSm,
phi=stanVec$locTrendFract, lambda=stanVec$coefTrend,
rho=stanVec$powTrend, l=stanVec$l[1], b=stanVec$b[1])
newState <- t(rbind(stanVec$l, stanVec$b))
#TODO: why are vectors l and b one value shorter?? this could be a problem
#currently, I just duplicate the first value
newState <- rbind(newState[1,], newState)
colnames(newState) <- c("l", "b")
newState <- ts(newState)
tspx <- tsp(mod[["etsDcmaes"]]$state)
#tspx[1] <- tspx[1]+1
tsp(newState) <- tspx
mod[["etsDcmaes"]]$state <- newState
mod[["etsDcmaes"]]$initstate <- mod[["etsDcmaes"]]$state[1,]
mod[["etsDcmaes"]]$par <- newPar
mod[["lgt_orig"]] <- mod[["lgt"]]
mod[["lgt"]]$params <- mod[["lgt"]]$paramMeans
mod[["lgt"]]$params[["l"]] <- t(mod[["lgt"]]$params[["l"]])
mod[["lgt"]]$params[["b"]] <- t(mod[["lgt"]]$params[["b"]])
#pdf(file="/home/bergmeir/20161217_series250_forecasts_LGT.pdf")
par(mfrow=c(2,2))
plot(forecast(mod[["etsDcmaes"]], simulate=TRUE, PI=FALSE, h=sizeTestSet), main="paramMeans, my forecast func")
#plot(forecast(mod[["etsDMalsCh"]], simulate=TRUE, PI=FALSE, h=sizeTestSet), main="1", ylim=c(3000,6000))
lines(data.test)
plot(forecast(mod[["etsD"]], simulate=TRUE, PI=FALSE, h=sizeTestSet), main="my implementation of LGT", ylim=c(3000,6000))
lines(data.test)
plot(forecast(mod[["lgt"]], h=sizeTestSet), main="paramMeans used for forecasting")
lines(data.test)
plot(forecast(mod[["lgt_orig"]], h=sizeTestSet), main="Original LGT")
lines(data.test)
#dev.off()
mod[["etsDMalsCh"]]$par
mod[["lgt_orig"]]$paramMeans
forecast(mod[["etsDcmaes"]], PI=FALSE, h=sizeTestSet, simulate=TRUE, h=6)
forecast(mod[["etsDMalsCh"]], PI=FALSE, h=sizeTestSet, simulate=TRUE)
names(mod[["etsDcmaes"]])
#TODO: forecasts should be more or less the same, but they are not
#obj <- mod[["etsDcmaes"]]
#obj$state[length(obj$x)+1,]
#obj$state
#
#
#obj$x
#
#
|
#' Convert Rasch difficulties of many dichotomous items to Partial Credit Model (PCM) thresholds
#'
#' Given a set of Rasch difficulties from n dichomtomous items, calculate the equivalent thresholds for a single polytomous item
#' (with n+1 categories) that fits the partial credit model (PCM).
#'
#' @param dich_diffs A vector of Rasch difficulties. The length of the vector represents the number of dichotomous items considered.
#'
#' @return A vector of thresholds for the partial credit model (PCM).
#'
#' @examples
#' \dontrun{
#' dichotomous.to.pcm(c(0,0))
#' pcm.to.dichotomous(c(-0.6931472,0.6931472))
#'
#' dichotomous.to.pcm(c(-2,-4,1))
#' pcm.to.dichotomous(c(-4.132845,-1.922140,1.054985))
#'
#' dichotomous.to.pcm(c(-1,-1,1,1))
#' pcm.to.dichotomous(c(-1.8200752,-0.6243906,0.6243906,1.8200752))
#' }
#' @export
dichotomous.to.pcm <- function (dich_diffs)
{
nite=length(dich_diffs)
Cs=sapply(1:nite,function(i)
-log(sum(exp(colSums(-utils::combn(dich_diffs, i)))))
)
#now calculate thresholds
taus=rep(NA,nite)
for(thresh in 1:nite){
if(thresh==1){taus[thresh]=Cs[thresh]}
if(thresh>1){taus[thresh]=Cs[thresh]-sum(taus[1:(thresh-1)])}
}
return(taus)
}
#' Convert Partial Credit Model (PCM) thresholds to equivalent set of n Rasch difficulties from dichotomous items
#'
#' Given a single polytomous item with n thresholds, calculate an equivalent set of Rasch difficulties from n dichomtomous items.
#' The aim is to provide a different way of interpreting the thresholds from a partial credit model by seeing
#' what set of difficulties from dichotomous items would lead to the same result.
#'
#' Note that, in practice, this function rarely works in the way we might hope.
#' Except in special circumstances (effectively when thresholds are correctly ordered and widely spaced),
#' it is likely that some (or all) of the identified Rasch difficulties will be imaginary numbers.
#' If a mix of imaginary and real numbers are returned in may be that the
#' imaginary ones can be combined to create PCM thresholds for an item with fewer categories than started with.
#' I have not investigated this fully.
#'
#' Method here is based on numerically solving the equation for Fn(x) on page 115 of
#' Huynh, H. (1994). On equivalence between a partial credit item and a set of independent Rasch binary items. Psychometrika, 59(1), 111-119.
#' (see https://doi.org/10.1007/BF02294270). We use the expressions directly in terms of Si (above equation 6
#' in the paper) rather than those in terms of ai.
#'
#' If the solution contains any non-zero imaginary parts it implies that an exactly equivalent
#' set of dichotomoues difficulties cannot be found.
#' In other words, the polytomous item cannot be considered as being the sum of multiple independent dichotomous items.
#'
#' @param pcm_thresh A vector of thresholds from the partial credit model.
#'
#' @return A data.frame with the real and imaginary parts of the equivalent dichotomous Rasch difficulties.
#' Very small imaginary numbers are rounded down to zero as they are considered to likely be part of estimation error.
#'
#' @examples
#' \dontrun{
#'
#' dichotomous.to.pcm(c(0,0))
#' pcm.to.dichotomous(c(-0.6931472,0.6931472))
#'
#' dichotomous.to.pcm(c(-2,-4,1))
#' pcm.to.dichotomous(c(-4.132845,-1.922140,1.054985))
#'
#' dichotomous.to.pcm(c(-1,-1,1,1))
#' pcm.to.dichotomous(c(-1.8200752,-0.6243906,0.6243906,1.8200752))
#'
#' dich_diffs=pcm.to.dichotomous(c(-2,0,2,3.4))
#' dich_diffs
#' dichotomous.to.pcm(dich_diffs$real_part)
#'
#' #and one that doesn't work (disordered thresholds)
#' pcm.to.dichotomous(c(1,0,3))
#' #investigate PCM thresholds for the imaginary bit
#' dichotomous.to.pcm(c(0.5228831-1.301217i,0.5228831+1.301217i))
#' #so equivalent to a 2-category PCM with disordered thresholds and a dichotomous item
#'
#' }
#' @export
pcm.to.dichotomous<- function (pcm_thresh)
{
Ss=exp(-cumsum(pcm_thresh))
n=length(pcm_thresh)
polysigns=(-1)^(1:n)
polycoefs=polysigns*Ss
#solutions found numerically
#use rounding to ignore tiny imaginary elements of numbers
roots=log(polyroot(c(1,polycoefs)))
real_roots=Re(roots)
im_roots=Im(roots)
is_real=(abs(im_roots)<0.00001)+0
im_roots[is_real==1]=0
n_real=sum(is_real)
dich_difficulties=data.frame(real_part=real_roots,imaginary_part=im_roots)
dich_difficulties=dich_difficulties[order(dich_difficulties$real_part),]
return(dich_difficulties)
}
| /R/DichotomousAndPolytomous.R | permissive | CambridgeAssessmentResearch/unimirt | R | false | false | 4,567 | r | #' Convert Rasch difficulties of many dichotomous items to Partial Credit Model (PCM) thresholds
#'
#' Given a set of Rasch difficulties from n dichomtomous items, calculate the equivalent thresholds for a single polytomous item
#' (with n+1 categories) that fits the partial credit model (PCM).
#'
#' @param dich_diffs A vector of Rasch difficulties. The length of the vector represents the number of dichotomous items considered.
#'
#' @return A vector of thresholds for the partial credit model (PCM).
#'
#' @examples
#' \dontrun{
#' dichotomous.to.pcm(c(0,0))
#' pcm.to.dichotomous(c(-0.6931472,0.6931472))
#'
#' dichotomous.to.pcm(c(-2,-4,1))
#' pcm.to.dichotomous(c(-4.132845,-1.922140,1.054985))
#'
#' dichotomous.to.pcm(c(-1,-1,1,1))
#' pcm.to.dichotomous(c(-1.8200752,-0.6243906,0.6243906,1.8200752))
#' }
#' @export
dichotomous.to.pcm <- function (dich_diffs)
{
nite=length(dich_diffs)
Cs=sapply(1:nite,function(i)
-log(sum(exp(colSums(-utils::combn(dich_diffs, i)))))
)
#now calculate thresholds
taus=rep(NA,nite)
for(thresh in 1:nite){
if(thresh==1){taus[thresh]=Cs[thresh]}
if(thresh>1){taus[thresh]=Cs[thresh]-sum(taus[1:(thresh-1)])}
}
return(taus)
}
#' Convert Partial Credit Model (PCM) thresholds to equivalent set of n Rasch difficulties from dichotomous items
#'
#' Given a single polytomous item with n thresholds, calculate an equivalent set of Rasch difficulties from n dichomtomous items.
#' The aim is to provide a different way of interpreting the thresholds from a partial credit model by seeing
#' what set of difficulties from dichotomous items would lead to the same result.
#'
#' Note that, in practice, this function rarely works in the way we might hope.
#' Except in special circumstances (effectively when thresholds are correctly ordered and widely spaced),
#' it is likely that some (or all) of the identified Rasch difficulties will be imaginary numbers.
#' If a mix of imaginary and real numbers are returned in may be that the
#' imaginary ones can be combined to create PCM thresholds for an item with fewer categories than started with.
#' I have not investigated this fully.
#'
#' Method here is based on numerically solving the equation for Fn(x) on page 115 of
#' Huynh, H. (1994). On equivalence between a partial credit item and a set of independent Rasch binary items. Psychometrika, 59(1), 111-119.
#' (see https://doi.org/10.1007/BF02294270). We use the expressions directly in terms of Si (above equation 6
#' in the paper) rather than those in terms of ai.
#'
#' If the solution contains any non-zero imaginary parts it implies that an exactly equivalent
#' set of dichotomoues difficulties cannot be found.
#' In other words, the polytomous item cannot be considered as being the sum of multiple independent dichotomous items.
#'
#' @param pcm_thresh A vector of thresholds from the partial credit model.
#'
#' @return A data.frame with the real and imaginary parts of the equivalent dichotomous Rasch difficulties.
#' Very small imaginary numbers are rounded down to zero as they are considered to likely be part of estimation error.
#'
#' @examples
#' \dontrun{
#'
#' dichotomous.to.pcm(c(0,0))
#' pcm.to.dichotomous(c(-0.6931472,0.6931472))
#'
#' dichotomous.to.pcm(c(-2,-4,1))
#' pcm.to.dichotomous(c(-4.132845,-1.922140,1.054985))
#'
#' dichotomous.to.pcm(c(-1,-1,1,1))
#' pcm.to.dichotomous(c(-1.8200752,-0.6243906,0.6243906,1.8200752))
#'
#' dich_diffs=pcm.to.dichotomous(c(-2,0,2,3.4))
#' dich_diffs
#' dichotomous.to.pcm(dich_diffs$real_part)
#'
#' #and one that doesn't work (disordered thresholds)
#' pcm.to.dichotomous(c(1,0,3))
#' #investigate PCM thresholds for the imaginary bit
#' dichotomous.to.pcm(c(0.5228831-1.301217i,0.5228831+1.301217i))
#' #so equivalent to a 2-category PCM with disordered thresholds and a dichotomous item
#'
#' }
#' @export
pcm.to.dichotomous<- function (pcm_thresh)
{
Ss=exp(-cumsum(pcm_thresh))
n=length(pcm_thresh)
polysigns=(-1)^(1:n)
polycoefs=polysigns*Ss
#solutions found numerically
#use rounding to ignore tiny imaginary elements of numbers
roots=log(polyroot(c(1,polycoefs)))
real_roots=Re(roots)
im_roots=Im(roots)
is_real=(abs(im_roots)<0.00001)+0
im_roots[is_real==1]=0
n_real=sum(is_real)
dich_difficulties=data.frame(real_part=real_roots,imaginary_part=im_roots)
dich_difficulties=dich_difficulties[order(dich_difficulties$real_part),]
return(dich_difficulties)
}
|
# Unfortunately, this cannot be run non-interactively (or at least
# I have not thought of a way to do so)
library(gridGraphics)
notrun <- function() {
plot(1)
identify(1)
dl <- recordPlot()
dev.off()
plotdiff(expression(replayPlot(dl)), "identify")
}
| /gridGraphics/test-scripts/test-identify.R | permissive | solgenomics/R_libs | R | false | false | 305 | r |
# Unfortunately, this cannot be run non-interactively (or at least
# I have not thought of a way to do so)
library(gridGraphics)
notrun <- function() {
plot(1)
identify(1)
dl <- recordPlot()
dev.off()
plotdiff(expression(replayPlot(dl)), "identify")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/overlapping_measure.R
\name{DOL_f}
\alias{DOL_f}
\title{Degree of overlapping (DOL) function for mixture model}
\usage{
DOL_f(x, alpha, beta, w)
}
\arguments{
\item{x}{Cauchy mixture data}
\item{alpha}{loaction parameters}
\item{beta}{scale parameters}
\item{w}{weight parameters}
}
\value{
DOL function
}
\description{
Degree of overlapping (DOL) function for mixture model
}
| /MMt/man/DOL_f.Rd | permissive | Likelyt/Mixture-Model-tools | R | false | true | 458 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/overlapping_measure.R
\name{DOL_f}
\alias{DOL_f}
\title{Degree of overlapping (DOL) function for mixture model}
\usage{
DOL_f(x, alpha, beta, w)
}
\arguments{
\item{x}{Cauchy mixture data}
\item{alpha}{loaction parameters}
\item{beta}{scale parameters}
\item{w}{weight parameters}
}
\value{
DOL function
}
\description{
Degree of overlapping (DOL) function for mixture model
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{punting}
\alias{punting}
\title{American football punting}
\format{A data frame with 13 observations on the following 7 variables.
\itemize{ \item{distance}{ mean distance for 10 punts (feet) }
\item{hang}{ mean hang time (seconds) }
\item{rStrength}{ right leg strength (pounds)}
\item{lStrength}{ left leg strength (pounds)}
\item{rFlexibility}{ right leg flexibility (degrees)}
\item{lFlexibility}{ left leg flexibility (degrees)}
\item{oStrength}{ overall leg strength (foot-pounds)} }}
\source{
These data are also available at OzDASL
(\url{http://www.statsci.org/data/}).
}
\description{
Investigators studied physical characteristics and ability in 13 football
punters. Each volunteer punted a football ten times. The investigators
recorded the average distance for the ten punts, in feet. They also recorded
the average hang time (time the ball is in the air before the receiver
catches it), and a number of measures of leg strength and flexibility.
}
\examples{
data(punting)
xyplot(hang ~ distance, data=punting)
}
\references{
"The relationship between selected physical performance
variables and football punting ability" by the Department of Health,
Physical Education and Recreation at the Virginia Polytechnic Institute and
State University, 1983.
}
\keyword{datasets}
| /man/punting.Rd | no_license | cran/fastR | R | false | true | 1,396 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{punting}
\alias{punting}
\title{American football punting}
\format{A data frame with 13 observations on the following 7 variables.
\itemize{ \item{distance}{ mean distance for 10 punts (feet) }
\item{hang}{ mean hang time (seconds) }
\item{rStrength}{ right leg strength (pounds)}
\item{lStrength}{ left leg strength (pounds)}
\item{rFlexibility}{ right leg flexibility (degrees)}
\item{lFlexibility}{ left leg flexibility (degrees)}
\item{oStrength}{ overall leg strength (foot-pounds)} }}
\source{
These data are also available at OzDASL
(\url{http://www.statsci.org/data/}).
}
\description{
Investigators studied physical characteristics and ability in 13 football
punters. Each volunteer punted a football ten times. The investigators
recorded the average distance for the ten punts, in feet. They also recorded
the average hang time (time the ball is in the air before the receiver
catches it), and a number of measures of leg strength and flexibility.
}
\examples{
data(punting)
xyplot(hang ~ distance, data=punting)
}
\references{
"The relationship between selected physical performance
variables and football punting ability" by the Department of Health,
Physical Education and Recreation at the Virginia Polytechnic Institute and
State University, 1983.
}
\keyword{datasets}
|
#' Get the pending streams for a dataset
#'
#' Retrieves the number of pending messages. Use [appendStream()] to
#' append all pending streamed rows to the dataset.
#'
#' @param ds a CrunchDataset
#' @return number of pending messages in the stream for the dataset
#' @export
pendingStream <- function(ds) {
stopifnot(is.dataset(ds))
stream_cat <- ShojiEntity(crGET(shojiURL(ds, "fragments", "stream")))
stream_cat$pending_messages
}
#' Stream data to a Crunch dataset
#'
#' @param ds a CrunchDataset
#' @param data a data.frame with data to send as a stream, The given data values
#' must be in the Crunch I/O format (for example, category ids instead of
#' names or numeric_values)
#' @keywords internal
streamRows <- function(ds, data) {
if (nrow(data)) {
payload <- by(data, seq_len(nrow(data)), function(row) toJSON(row))
payload <- paste0(payload, collapse = "\n")
crPOST(shojiURL(ds, "fragments", "stream"), body = payload)
}
invisible(refresh(ds))
}
#' Manually trigger a pending append to a dataset
#'
#' Crunch allows you to stream data to a dataset. Streaming data is useful for
#' datasets which have frequent updates (see the
#' [Crunch API documentation](https://docs.crunch.io/#streaming-rows) for more
#' information). Crunch automatically appends streamed data periodically;
#' however, if you would like to trigger appending pending streamed data to a
#' dataset, you can call `appendStream()`.
#'
#' @param ds a CrunchDataset
#' @return the dataset with pending stream data appended.
#' @export
appendStream <- function(ds) {
stopifnot(is.dataset(ds))
if (pendingStream(ds) < 1) {
message("There's no pending stream data to be appended.")
return(ds)
}
ds <- addBatch(ds, type = "ldjson", stream = NULL)
return(ds)
}
#' Set the streaming property of a dataset
#'
#' Only datasets that have their streaming property set to "streaming" can
#' have rows streamed to them. Before attempting to streaming rows (with
#' [streamRows] for example), the dataset has to be set up to stream rows. Use
#' `streaming(ds)` to get the streaming status, and `streaming(ds) <-
#' "streaming"` to set the streaming status.
#'
#' @param x a CrunchDataset
#' @param value for setting only (values can be: `"no"`, `"streaming"`, or
#' `"finished"`)
#'
#' @return the streaming status
#' @rdname streaming
#' @export
streaming <- function(x) {
stopifnot(is.dataset(x))
return(x@body$streaming)
}
#' @rdname streaming
#' @export
`streaming<-` <- function(x, value = c("no", "streaming", "finished")) {
stopifnot(is.dataset(x))
value <- match.arg(value)
return(setEntitySlot(x, "streaming", value))
}
| /R/dataset-stream.R | no_license | nealrichardson/rcrunch | R | false | false | 2,708 | r | #' Get the pending streams for a dataset
#'
#' Retrieves the number of pending messages. Use [appendStream()] to
#' append all pending streamed rows to the dataset.
#'
#' @param ds a CrunchDataset
#' @return number of pending messages in the stream for the dataset
#' @export
pendingStream <- function(ds) {
stopifnot(is.dataset(ds))
stream_cat <- ShojiEntity(crGET(shojiURL(ds, "fragments", "stream")))
stream_cat$pending_messages
}
#' Stream data to a Crunch dataset
#'
#' @param ds a CrunchDataset
#' @param data a data.frame with data to send as a stream, The given data values
#' must be in the Crunch I/O format (for example, category ids instead of
#' names or numeric_values)
#' @keywords internal
streamRows <- function(ds, data) {
if (nrow(data)) {
payload <- by(data, seq_len(nrow(data)), function(row) toJSON(row))
payload <- paste0(payload, collapse = "\n")
crPOST(shojiURL(ds, "fragments", "stream"), body = payload)
}
invisible(refresh(ds))
}
#' Manually trigger a pending append to a dataset
#'
#' Crunch allows you to stream data to a dataset. Streaming data is useful for
#' datasets which have frequent updates (see the
#' [Crunch API documentation](https://docs.crunch.io/#streaming-rows) for more
#' information). Crunch automatically appends streamed data periodically;
#' however, if you would like to trigger appending pending streamed data to a
#' dataset, you can call `appendStream()`.
#'
#' @param ds a CrunchDataset
#' @return the dataset with pending stream data appended.
#' @export
appendStream <- function(ds) {
stopifnot(is.dataset(ds))
if (pendingStream(ds) < 1) {
message("There's no pending stream data to be appended.")
return(ds)
}
ds <- addBatch(ds, type = "ldjson", stream = NULL)
return(ds)
}
#' Set the streaming property of a dataset
#'
#' Only datasets that have their streaming property set to "streaming" can
#' have rows streamed to them. Before attempting to streaming rows (with
#' [streamRows] for example), the dataset has to be set up to stream rows. Use
#' `streaming(ds)` to get the streaming status, and `streaming(ds) <-
#' "streaming"` to set the streaming status.
#'
#' @param x a CrunchDataset
#' @param value for setting only (values can be: `"no"`, `"streaming"`, or
#' `"finished"`)
#'
#' @return the streaming status
#' @rdname streaming
#' @export
streaming <- function(x) {
stopifnot(is.dataset(x))
return(x@body$streaming)
}
#' @rdname streaming
#' @export
`streaming<-` <- function(x, value = c("no", "streaming", "finished")) {
stopifnot(is.dataset(x))
value <- match.arg(value)
return(setEntitySlot(x, "streaming", value))
}
|
#' 'imageId'
#'
#' imageId function will read SOPInstanceUID metadata of all DICOM files read by DICOMHeaderList function
#'
#'
#' @param DICOMList you can put it like this and then run the function : DICOMList<-DICOMHeaderList(DICOMFolderPath)
#' @import digest
#' @import dplyr
#' @importFrom magrittr "%>%"
#'
#'
#' @return A list containing anonymized SOPInstanceUID of DICOM
#' @examples
#' DICOMList<-DICOMHeaderList(DICOMFolderPath)
#' imageId(DICOMList)
#' @export
imageId<-function(DICOMList){
imageId<-lapply(DICOMList, function(x){
imageId<-as.character(x[[1]] %>% dplyr::filter(name=='SOPInstanceUID') %>% dplyr::select(value))
if(imageId=="character(0)" | imageId=="" | imageId=="integer(0)"){
imageId='NA'
}
return(imageId)
})
imageId<-as.data.frame(do.call(rbind, imageId))
colnames(imageId)<-'imageId'
return(imageId)
}
| /RadETL/R/imageId_ImageTable.R | no_license | emad0525/Radiology-CDM | R | false | false | 902 | r | #' 'imageId'
#'
#' imageId function will read SOPInstanceUID metadata of all DICOM files read by DICOMHeaderList function
#'
#'
#' @param DICOMList you can put it like this and then run the function : DICOMList<-DICOMHeaderList(DICOMFolderPath)
#' @import digest
#' @import dplyr
#' @importFrom magrittr "%>%"
#'
#'
#' @return A list containing anonymized SOPInstanceUID of DICOM
#' @examples
#' DICOMList<-DICOMHeaderList(DICOMFolderPath)
#' imageId(DICOMList)
#' @export
imageId<-function(DICOMList){
imageId<-lapply(DICOMList, function(x){
imageId<-as.character(x[[1]] %>% dplyr::filter(name=='SOPInstanceUID') %>% dplyr::select(value))
if(imageId=="character(0)" | imageId=="" | imageId=="integer(0)"){
imageId='NA'
}
return(imageId)
})
imageId<-as.data.frame(do.call(rbind, imageId))
colnames(imageId)<-'imageId'
return(imageId)
}
|
# ---HEADER-------------------------------------------------------------------------------------------------------------
# Purpose: Create proportions to split ISIC based on concordance tables
# Map the flows between major groups across ISIC versions in order to crossawlk between versions
#********************************************************************************************************************************
# ---CONFIG----------------------------------------------------------------------------------------------------------------------
# clear memory
rm(list=ls())
# disable scientific notation
options(scipen = 999)
# load packages
library(Cairo, lib.loc = "FILEPATH")
pacman::p_load(data.table, gridExtra, ggplot2, lme4, magrittr, parallel, stringr)
library(xlsx, lib.loc = "FILEPATH")
# set working directories
home.dir <- "FILEPATH"
setwd(home.dir)
#set values for project
options(bitmapType="cairo")
##in##
data.dir <- file.path(home.dir, "FILEPATH")
doc.dir <- file.path(home.dir, "FILEPATH")
isic.map <- file.path(doc.dir, "FILEPATH")
##out##
out.dir <- file.path(home.dir, "FILEPATH")
graph.dir <- file.path(home.dir, "FILEPATH")
#***********************************************************************************************************************
# ---FUNCTIONS----------------------------------------------------------------------------------------------------------
##function lib##
#general functions#
central.function.dir <- "FILEPATH"
ubcov.function.dir <- "FILEPATH"
# this pulls the general misc helper functions
file.path(central.function.dir, "FUNCTION") %>% source
# other tools created by covs team for querying db (personal version)
file.path(central.function.dir, "FUNCTION") %>% source
# other tools created by covs team for querying db
file.path(ubcov.function.dir, "FUNCTION") %>% source
# central functions
file.path('FUNCTION') %>% source
#custom fx
"%ni%" <- Negate("%in%") # create a reverse %in% operator
#riverplot
makeRivPlot <- function(data1, data2, var1, var2, var3) {
require(dplyr) # Needed for the count function
require(riverplot) # Does all the real work
require(RColorBrewer) # To assign nice colours
#browser()
#pull out all your node IDs
names <- mapply(function(dt, var) unique(dt[, var, with=F]),
dt=list(data1, data1, data2),
var=c(var1, var2, var3))
labels <- mapply(function(dt, var) unique(dt[, str_replace(var,
"major_",
"major_label_short_isic_"), with=F]),
dt=list(data1, data1, data2),
var=c(var1, var2, var3))
labels <- mapply(function(list1, list2) paste0(list1, ":\n", list2), names, labels)
labels <- lapply(labels, function(var) str_replace(var, pattern=";", replacement=";\n"))
labels <- lapply(labels, function(var) str_replace(var, pattern="and", replacement="\nand"))
#create the edge object by subsetting your dts to the relevant variable combos and then collapsing with count
dt1 <- data1[, c(var1, var2), with=F]
dt2 <- data2[, c(var2, var3), with=F]
edges <- rbind(dt1, dt2, use.names=F) %>% count
colnames(edges) <- c("N1", "N2", "Value")
#function to make a color palette with lots of colors from ColorBrewer
manyColors <- function(var1) {
n <- length(var1)
qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',]
col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))
sample(col_vector, n) %>% return
}
#create node object
nodes <- data.frame(ID = names %>% unlist,
x = c(rep(1, times = length(names[[1]])),
rep(2, times = length(names[[2]])),
rep(3, times = length(names[[3]]))), #x coord defined as 1/2/3 based on ISIC version
col = paste0(lapply(names, manyColors) %>% unlist, 75),
labels = labels %>% unlist, #labels and IDs are equal in this usecase
stringsAsFactors=FALSE)
#make riverplot object
river <- makeRiver(nodes, edges) %>% return #return river object for manipulation
}
#***********************************************************************************************************************
#***********************************************************************************************************************
# ---ISICv2 to ISICv3---------------------------------------------------------------------------------------------------
#read in the ISICv2->3 concordance table and analyze the flows between major groups
dt <- file.path(data.dir, 'FILEPATH') %>% fread
setnames(dt, c('ISIC2', 'partialISIC2', 'ISIC3', 'partialISIC3'), c('code_isic_2', 'partial_2', 'code_isic_3', 'partial_3'))
#read in the map to major groups (sheet3=rev3, sheet4=rev2)
isic_3_map <- read.xlsx(isic.map, sheetIndex = 3) %>% as.data.table
setnames(isic_3_map, names(isic_3_map), paste0(names(isic_3_map), "_isic_3"))
isic_2_map <- read.xlsx(isic.map, sheetIndex = 4) %>% as.data.table
setnames(isic_2_map, names(isic_2_map), paste0(names(isic_2_map), "_isic_2"))
#extract minor groups as the first 2 digits to merge on major groups
dt[, minor_isic_3 := substr(code_isic_3, start = 1, stop = 2)]
dt[, minor_isic_2 := substr(code_isic_2, start = 1, stop = 2) %>% as.numeric]
all_2_3 <- merge(dt, isic_3_map, by='minor_isic_3')
all_2_3 <- merge(all_2_3, isic_2_map, by='minor_isic_2')
all_2_3[, major_2 := as.factor(paste0("2_", major_isic_2))] #factor for riverplot
all_2_3[, major_3 := as.factor(paste0("3_", major_isic_3))]
#create proportions to split v 2 major groups into v 3 major groups
collapse <- all_2_3[, list(code_isic_2, code_isic_3, major_isic_3, major_label_short_isic_3,
major_isic_2, major_label_short_isic_2)]
collapse <- collapse[, .N, by=c("major_isic_3", "major_label_short_isic_3", "major_isic_2", "major_label_short_isic_2")]
collapse[, denom_wt := sum(N), by='major_isic_3']
collapse[, denom_prop := sum(N), by='major_isic_2']
collapse[, weight := N/denom_wt]
collapse[, prop := N/denom_prop]
write.csv(collapse, file.path(out.dir, "FILEPATH"),row.names=F)
#***********************************************************************************************************************
# ---ISICv3.1 to ISICv4-------------------------------------------------------------------------------------------------
#read in the ISICv3.1->4 concordance table and analyze the flows between major groups
dt <- file.path(data.dir, 'FILEPATH') %>% fread
setnames(dt, c('ISIC31code', 'partialISIC31', 'ISIC4code', 'partialISIC4'), c('code_isic_3', 'partial_3', 'code_isic_4', 'partial_4'))
#read in the map to major groups (sheet2=rev3.1, sheet1=rev4)
isic_3_map <- read.xlsx(isic.map, sheetIndex = 2) %>% as.data.table
setnames(isic_3_map, names(isic_3_map), paste0(names(isic_3_map), "_isic_3"))
isic_4_map <- read.xlsx(isic.map, sheetIndex = 1) %>% as.data.table
setnames(isic_4_map, names(isic_4_map), paste0(names(isic_4_map), "_isic_4"))
#extract minor groups as the first 2 digits to merge on major groups
dt[, minor_isic_3 := substr(code_isic_3, start = 1, stop = 2)]
dt[, minor_isic_4 := substr(code_isic_4, start = 1, stop = 2)]
all_3_4 <- merge(dt, isic_3_map, by='minor_isic_3')
all_3_4 <- merge(all_3_4, isic_4_map, by='minor_isic_4')
all_3_4[, major_3 := as.factor(paste0("3_", major_isic_3))] #factor for riverplot
all_3_4[, major_4 := as.factor(paste0("4_", major_isic_4))] #factor for riverplot
#create proportions to split v 4 major groups into v 3 major groups
collapse <- all_3_4[, list(code_isic_3, code_isic_4, major_isic_3, major_label_short_isic_3,
major_isic_4, major_label_short_isic_4)]
collapse <- collapse[, .N, by=c("major_isic_3", "major_label_short_isic_3", "major_isic_4", "major_label_short_isic_4")]
collapse[, denom_wt := sum(N), by='major_isic_3']
collapse[, denom_prop := sum(N), by='major_isic_4']
collapse[, weight := N/denom_wt]
collapse[, prop := N/denom_prop]
write.csv(collapse, file.path(out.dir, "FILEPATH"),row.names=F)
| /gbd_2019/risk_factors_code/occ/employment/isic_crosswalk.R | no_license | Nermin-Ghith/ihme-modeling | R | false | false | 8,210 | r | # ---HEADER-------------------------------------------------------------------------------------------------------------
# Purpose: Create proportions to split ISIC based on concordance tables
# Map the flows between major groups across ISIC versions in order to crossawlk between versions
#********************************************************************************************************************************
# ---CONFIG----------------------------------------------------------------------------------------------------------------------
# clear memory
rm(list=ls())
# disable scientific notation
options(scipen = 999)
# load packages
library(Cairo, lib.loc = "FILEPATH")
pacman::p_load(data.table, gridExtra, ggplot2, lme4, magrittr, parallel, stringr)
library(xlsx, lib.loc = "FILEPATH")
# set working directories
home.dir <- "FILEPATH"
setwd(home.dir)
#set values for project
options(bitmapType="cairo")
##in##
data.dir <- file.path(home.dir, "FILEPATH")
doc.dir <- file.path(home.dir, "FILEPATH")
isic.map <- file.path(doc.dir, "FILEPATH")
##out##
out.dir <- file.path(home.dir, "FILEPATH")
graph.dir <- file.path(home.dir, "FILEPATH")
#***********************************************************************************************************************
# ---FUNCTIONS----------------------------------------------------------------------------------------------------------
##function lib##
#general functions#
central.function.dir <- "FILEPATH"
ubcov.function.dir <- "FILEPATH"
# this pulls the general misc helper functions
file.path(central.function.dir, "FUNCTION") %>% source
# other tools created by covs team for querying db (personal version)
file.path(central.function.dir, "FUNCTION") %>% source
# other tools created by covs team for querying db
file.path(ubcov.function.dir, "FUNCTION") %>% source
# central functions
file.path('FUNCTION') %>% source
#custom fx
"%ni%" <- Negate("%in%") # create a reverse %in% operator
#riverplot
makeRivPlot <- function(data1, data2, var1, var2, var3) {
require(dplyr) # Needed for the count function
require(riverplot) # Does all the real work
require(RColorBrewer) # To assign nice colours
#browser()
#pull out all your node IDs
names <- mapply(function(dt, var) unique(dt[, var, with=F]),
dt=list(data1, data1, data2),
var=c(var1, var2, var3))
labels <- mapply(function(dt, var) unique(dt[, str_replace(var,
"major_",
"major_label_short_isic_"), with=F]),
dt=list(data1, data1, data2),
var=c(var1, var2, var3))
labels <- mapply(function(list1, list2) paste0(list1, ":\n", list2), names, labels)
labels <- lapply(labels, function(var) str_replace(var, pattern=";", replacement=";\n"))
labels <- lapply(labels, function(var) str_replace(var, pattern="and", replacement="\nand"))
#create the edge object by subsetting your dts to the relevant variable combos and then collapsing with count
dt1 <- data1[, c(var1, var2), with=F]
dt2 <- data2[, c(var2, var3), with=F]
edges <- rbind(dt1, dt2, use.names=F) %>% count
colnames(edges) <- c("N1", "N2", "Value")
#function to make a color palette with lots of colors from ColorBrewer
manyColors <- function(var1) {
n <- length(var1)
qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',]
col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))
sample(col_vector, n) %>% return
}
#create node object
nodes <- data.frame(ID = names %>% unlist,
x = c(rep(1, times = length(names[[1]])),
rep(2, times = length(names[[2]])),
rep(3, times = length(names[[3]]))), #x coord defined as 1/2/3 based on ISIC version
col = paste0(lapply(names, manyColors) %>% unlist, 75),
labels = labels %>% unlist, #labels and IDs are equal in this usecase
stringsAsFactors=FALSE)
#make riverplot object
river <- makeRiver(nodes, edges) %>% return #return river object for manipulation
}
#***********************************************************************************************************************
#***********************************************************************************************************************
# ---ISICv2 to ISICv3---------------------------------------------------------------------------------------------------
#read in the ISICv2->3 concordance table and analyze the flows between major groups
dt <- file.path(data.dir, 'FILEPATH') %>% fread
setnames(dt, c('ISIC2', 'partialISIC2', 'ISIC3', 'partialISIC3'), c('code_isic_2', 'partial_2', 'code_isic_3', 'partial_3'))
#read in the map to major groups (sheet3=rev3, sheet4=rev2)
isic_3_map <- read.xlsx(isic.map, sheetIndex = 3) %>% as.data.table
setnames(isic_3_map, names(isic_3_map), paste0(names(isic_3_map), "_isic_3"))
isic_2_map <- read.xlsx(isic.map, sheetIndex = 4) %>% as.data.table
setnames(isic_2_map, names(isic_2_map), paste0(names(isic_2_map), "_isic_2"))
#extract minor groups as the first 2 digits to merge on major groups
dt[, minor_isic_3 := substr(code_isic_3, start = 1, stop = 2)]
dt[, minor_isic_2 := substr(code_isic_2, start = 1, stop = 2) %>% as.numeric]
all_2_3 <- merge(dt, isic_3_map, by='minor_isic_3')
all_2_3 <- merge(all_2_3, isic_2_map, by='minor_isic_2')
all_2_3[, major_2 := as.factor(paste0("2_", major_isic_2))] #factor for riverplot
all_2_3[, major_3 := as.factor(paste0("3_", major_isic_3))]
#create proportions to split v 2 major groups into v 3 major groups
collapse <- all_2_3[, list(code_isic_2, code_isic_3, major_isic_3, major_label_short_isic_3,
major_isic_2, major_label_short_isic_2)]
collapse <- collapse[, .N, by=c("major_isic_3", "major_label_short_isic_3", "major_isic_2", "major_label_short_isic_2")]
collapse[, denom_wt := sum(N), by='major_isic_3']
collapse[, denom_prop := sum(N), by='major_isic_2']
collapse[, weight := N/denom_wt]
collapse[, prop := N/denom_prop]
write.csv(collapse, file.path(out.dir, "FILEPATH"),row.names=F)
#***********************************************************************************************************************
# ---ISICv3.1 to ISICv4-------------------------------------------------------------------------------------------------
#read in the ISICv3.1->4 concordance table and analyze the flows between major groups
dt <- file.path(data.dir, 'FILEPATH') %>% fread
setnames(dt, c('ISIC31code', 'partialISIC31', 'ISIC4code', 'partialISIC4'), c('code_isic_3', 'partial_3', 'code_isic_4', 'partial_4'))
#read in the map to major groups (sheet2=rev3.1, sheet1=rev4)
isic_3_map <- read.xlsx(isic.map, sheetIndex = 2) %>% as.data.table
setnames(isic_3_map, names(isic_3_map), paste0(names(isic_3_map), "_isic_3"))
isic_4_map <- read.xlsx(isic.map, sheetIndex = 1) %>% as.data.table
setnames(isic_4_map, names(isic_4_map), paste0(names(isic_4_map), "_isic_4"))
#extract minor groups as the first 2 digits to merge on major groups
dt[, minor_isic_3 := substr(code_isic_3, start = 1, stop = 2)]
dt[, minor_isic_4 := substr(code_isic_4, start = 1, stop = 2)]
all_3_4 <- merge(dt, isic_3_map, by='minor_isic_3')
all_3_4 <- merge(all_3_4, isic_4_map, by='minor_isic_4')
all_3_4[, major_3 := as.factor(paste0("3_", major_isic_3))] #factor for riverplot
all_3_4[, major_4 := as.factor(paste0("4_", major_isic_4))] #factor for riverplot
#create proportions to split v 4 major groups into v 3 major groups
collapse <- all_3_4[, list(code_isic_3, code_isic_4, major_isic_3, major_label_short_isic_3,
major_isic_4, major_label_short_isic_4)]
collapse <- collapse[, .N, by=c("major_isic_3", "major_label_short_isic_3", "major_isic_4", "major_label_short_isic_4")]
collapse[, denom_wt := sum(N), by='major_isic_3']
collapse[, denom_prop := sum(N), by='major_isic_4']
collapse[, weight := N/denom_wt]
collapse[, prop := N/denom_prop]
write.csv(collapse, file.path(out.dir, "FILEPATH"),row.names=F)
|
# Simulation Study Code for:
# No Measurement Error
# 2n
# 10
# Missing Not at Random
# GLMNET
# Last Modified: 3/7/2020
Sys.setenv(JAVA_HOME='')
library(earth)
library(randomForest)
library(DMwR)
library(caret)
library(caretEnsemble)
library(pROC)
library(glmnet)
library(plotROC)
library(tictoc)
library(mice)
library(gtools)
library(data.table)
library(readxl)
library(openxlsx)
set.seed(6) # Random seed used for all 500 iterations
auc_list <- c() # List to store the AUC values
mod <- c() # List to store the tuning parameters at each iteration for the method
# Name of the file that will output the AUC values. Its name consists
# of the four data mining properties and the method from the caret package
of="NoError_2n_10_MNAR_GLMNET.csv"
# Th execution time will also be recorded
tic("timer")
# 500 iterations of this program will be run
for (i in 1:500){
n = 1500 # Size of the training + testing corpus
# Generate 12 predictors from a standard normal distribution with mean 0 & var 1
x1 = rnorm(n,mean = 0,sd = 1)
x2 = rnorm(n,mean = 0,sd = 1)
x3 = rnorm(n,mean = 0,sd = 1)
x4 = rnorm(n,mean = 0,sd = 1)
x5 = rnorm(n,mean = 0,sd = 1)
x6 = rnorm(n,mean = 0,sd = 1)
x7 = rnorm(n,mean = 0,sd = 1)
x8 = rnorm(n,mean = 0,sd = 1)
x9 = rnorm(n,mean = 0,sd = 1)
x10 = rnorm(n,mean = 0,sd = 1)
x11 = rnorm(n,mean = 0,sd = 1)
x12 = rnorm(n,mean = 0,sd = 1)
# Logistic Equation
z = -3 + .75*x1 + .75*x2 + .75*x3 + .75*x4 + .75*x5 + .75*x6+rnorm(1,0,0.0001) # linear combination with a bias
pr = 1/(1+exp(z)) # Inverted logit function for the majority class
y = rbinom(n,1,pr) # Bernoulli response variable
# Create a dataframe with the independent variables and response variable
data_mat <- as.data.frame(cbind(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,y))
# Class imbalance: 10% minority class and 90% majority outcome
test_fail <- data_mat[ sample( which(data_mat$y==0), 50), ]
test_pass <- data_mat[ sample( which(data_mat$y==1), 450), ]
testing_data <- rbind(test_fail,test_pass)
# Divide the data into training and testing sets
training_data <- subset(data_mat, !(rownames(data_mat) %in% rownames(testing_data)))
train_dep <- training_data$y
testing_data <- rbind(test_fail,test_pass)
training_data <- subset(data_mat, !(rownames(data_mat) %in% rownames(testing_data)))
train_dep <- training_data$y
# Data Amputation: Missing Not at Random
data_mat_final <- ampute(data = training_data[,1:ncol(training_data)-1], prop = 0.6, mech = 'MNAR', type = 'RIGHT', patterns = c(0, 0, 1,0,0,1,0,0,1,0,0,1), weights = c(0, 0, 1,0,0,1,0,0,1,0,0,1))$amp
# After applying amputation, we reorganize the corpus
data_mat_final$index <- as.numeric(row.names(data_mat_final))
data_mat_final <- data_mat_final[order(data_mat_final$index), ]
data_mat_final <- subset(data_mat_final, select = -c(index))
data_original <- data_mat_final
eve_data <- cbind(data_original,train_dep)
names(eve_data)[names(eve_data) == 'train_dep'] <- 'y'
training_data <- eve_data
# Apply MICE to fill in the missing entries of the training data
mice_training <- mice(training_data,m=1,maxit=50,meth='pmm',seed=500)
training_data <- complete(mice_training,1)
# Convert the dependent variable to pass and fail
training_data$y[training_data$y == "0"] <- "F"
training_data$y[training_data$y == "1"] <- "P"
testing_data$y[testing_data$y == "0"] <- "F"
testing_data$y[testing_data$y == "1"] <- "P"
# Convert the dependent variable to a factor
training_data$y <- factor(training_data$y)
testing_data$y <- factor(testing_data$y)
# Apply SMOTE to the training data
training_data <- SMOTE(y ~ ., data = training_data)
# 10-fold cross-validation will be applied to the training data
ctrl = trainControl(method = "repeatedcv", repeats = 1, classProbs = T, savePredictions = T, summaryFunction = twoClassSummary)
mymethods = c("glmnet") # Data mining method
out = caretList(y~., data = training_data, methodList = mymethods, trControl = ctrl, tuneLength = 6) # Train the model
# Apply the model to the testing data and calculate the AUC on the testing corpus
model_preds_tst = lapply(out, predict, newdata = testing_data[, 1:(dim(testing_data)[2] - 1)], type = "prob")
model_preds_tst = lapply(model_preds_tst, function(x)x[,"F"])
model_preds_tst = as.data.frame(model_preds_tst)[,-4]
auc_test = caTools::colAUC(model_preds_tst, testing_data$y == "F", plotROC = T)
auc_list[i] <- auc_test
# Store the tuning parameters for each iteration in a csv spreadsheet
if (i > 1){
mod <- rbind(mod,out$glmnet$bestTune)
}else{
mod <- data.frame(out$glmnet$bestTune)
}
print(i)
rm(data_mat,testing_data)
}
write.csv(mod,'NoError_2n_10_MNAR_GLMNET_OUT.csv') # CSV file with parameters
print('')
toc(log=TRUE) # Record the execution time
boxplot(auc_list) # Generate a boxplot of the AUC values
write.csv(auc_list,file=paste('AUC',paste(mymethods,sep="_"),of)) # AUC spreadsheet | /NoMeasureError_2n_10_MNAR_GLMNET.R | no_license | robertobertolini/Binary_Classification_Simulation_Study | R | false | false | 5,240 | r | # Simulation Study Code for:
# No Measurement Error
# 2n
# 10
# Missing Not at Random
# GLMNET
# Last Modified: 3/7/2020
Sys.setenv(JAVA_HOME='')
library(earth)
library(randomForest)
library(DMwR)
library(caret)
library(caretEnsemble)
library(pROC)
library(glmnet)
library(plotROC)
library(tictoc)
library(mice)
library(gtools)
library(data.table)
library(readxl)
library(openxlsx)
set.seed(6) # Random seed used for all 500 iterations
auc_list <- c() # List to store the AUC values
mod <- c() # List to store the tuning parameters at each iteration for the method
# Name of the file that will output the AUC values. Its name consists
# of the four data mining properties and the method from the caret package
of="NoError_2n_10_MNAR_GLMNET.csv"
# Th execution time will also be recorded
tic("timer")
# 500 iterations of this program will be run
for (i in 1:500){
n = 1500 # Size of the training + testing corpus
# Generate 12 predictors from a standard normal distribution with mean 0 & var 1
x1 = rnorm(n,mean = 0,sd = 1)
x2 = rnorm(n,mean = 0,sd = 1)
x3 = rnorm(n,mean = 0,sd = 1)
x4 = rnorm(n,mean = 0,sd = 1)
x5 = rnorm(n,mean = 0,sd = 1)
x6 = rnorm(n,mean = 0,sd = 1)
x7 = rnorm(n,mean = 0,sd = 1)
x8 = rnorm(n,mean = 0,sd = 1)
x9 = rnorm(n,mean = 0,sd = 1)
x10 = rnorm(n,mean = 0,sd = 1)
x11 = rnorm(n,mean = 0,sd = 1)
x12 = rnorm(n,mean = 0,sd = 1)
# Logistic Equation
z = -3 + .75*x1 + .75*x2 + .75*x3 + .75*x4 + .75*x5 + .75*x6+rnorm(1,0,0.0001) # linear combination with a bias
pr = 1/(1+exp(z)) # Inverted logit function for the majority class
y = rbinom(n,1,pr) # Bernoulli response variable
# Create a dataframe with the independent variables and response variable
data_mat <- as.data.frame(cbind(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,y))
# Class imbalance: 10% minority class and 90% majority outcome
test_fail <- data_mat[ sample( which(data_mat$y==0), 50), ]
test_pass <- data_mat[ sample( which(data_mat$y==1), 450), ]
testing_data <- rbind(test_fail,test_pass)
# Divide the data into training and testing sets
training_data <- subset(data_mat, !(rownames(data_mat) %in% rownames(testing_data)))
train_dep <- training_data$y
testing_data <- rbind(test_fail,test_pass)
training_data <- subset(data_mat, !(rownames(data_mat) %in% rownames(testing_data)))
train_dep <- training_data$y
# Data Amputation: Missing Not at Random
data_mat_final <- ampute(data = training_data[,1:ncol(training_data)-1], prop = 0.6, mech = 'MNAR', type = 'RIGHT', patterns = c(0, 0, 1,0,0,1,0,0,1,0,0,1), weights = c(0, 0, 1,0,0,1,0,0,1,0,0,1))$amp
# After applying amputation, we reorganize the corpus
data_mat_final$index <- as.numeric(row.names(data_mat_final))
data_mat_final <- data_mat_final[order(data_mat_final$index), ]
data_mat_final <- subset(data_mat_final, select = -c(index))
data_original <- data_mat_final
eve_data <- cbind(data_original,train_dep)
names(eve_data)[names(eve_data) == 'train_dep'] <- 'y'
training_data <- eve_data
# Apply MICE to fill in the missing entries of the training data
mice_training <- mice(training_data,m=1,maxit=50,meth='pmm',seed=500)
training_data <- complete(mice_training,1)
# Convert the dependent variable to pass and fail
training_data$y[training_data$y == "0"] <- "F"
training_data$y[training_data$y == "1"] <- "P"
testing_data$y[testing_data$y == "0"] <- "F"
testing_data$y[testing_data$y == "1"] <- "P"
# Convert the dependent variable to a factor
training_data$y <- factor(training_data$y)
testing_data$y <- factor(testing_data$y)
# Apply SMOTE to the training data
training_data <- SMOTE(y ~ ., data = training_data)
# 10-fold cross-validation will be applied to the training data
ctrl = trainControl(method = "repeatedcv", repeats = 1, classProbs = T, savePredictions = T, summaryFunction = twoClassSummary)
mymethods = c("glmnet") # Data mining method
out = caretList(y~., data = training_data, methodList = mymethods, trControl = ctrl, tuneLength = 6) # Train the model
# Apply the model to the testing data and calculate the AUC on the testing corpus
model_preds_tst = lapply(out, predict, newdata = testing_data[, 1:(dim(testing_data)[2] - 1)], type = "prob")
model_preds_tst = lapply(model_preds_tst, function(x)x[,"F"])
model_preds_tst = as.data.frame(model_preds_tst)[,-4]
auc_test = caTools::colAUC(model_preds_tst, testing_data$y == "F", plotROC = T)
auc_list[i] <- auc_test
# Store the tuning parameters for each iteration in a csv spreadsheet
if (i > 1){
mod <- rbind(mod,out$glmnet$bestTune)
}else{
mod <- data.frame(out$glmnet$bestTune)
}
print(i)
rm(data_mat,testing_data)
}
write.csv(mod,'NoError_2n_10_MNAR_GLMNET_OUT.csv') # CSV file with parameters
print('')
toc(log=TRUE) # Record the execution time
boxplot(auc_list) # Generate a boxplot of the AUC values
write.csv(auc_list,file=paste('AUC',paste(mymethods,sep="_"),of)) # AUC spreadsheet |
library(tidyverse)
library(patchwork)
# Function 1
# linear
tibble(x = c(0, 2, 3, 4,5, 7), y = c(1, 0.8, 0.7, 0.69, 0.65, 0)) %>%
mutate(x2 = x^2, x3 = x^3) -> df
m <- lm(data = df, y ~ x +x2 + x3)
plot(seq(0, 10, 0.1), predict(m, tibble(x = seq(0, 10, 0.1), x2 = x^2, x3 = x^2)))
ggplot(df, aes(x, y)) + geom_point() + stat_smooth(method="lm", se=TRUE, fill=NA,
formula=y ~ poly(x, 3, raw=TRUE),colour="red")
p_given_distance <- function(d) {
e <- max(0, 1 - 0.25*d + 0.08*d^2 - 0.01*d^3)
e <- min(1, e)
return(e)
}
# generate simulted psychometric curve (assuming standing in the center)
phi = 0
deltas = seq(0, 8, 0.01)
tibble(
delta = deltas,
p = map_dbl(abs(phi*deltas - deltas), p_given_distance)) %>%
ggplot(aes(x = delta, y = p)) + geom_path(colour = "white", size = 2) +
see::theme_blackboard() +
ggtitle("Psychometric Curve") -> plt1
phi = seq(0, 1, 0.01)
deltas <- seq(1, 6, 0.2)
df = tibble()
for (delta in deltas) {
p1 <- map_dbl(abs(phi*delta - delta), p_given_distance)
p2 <- map_dbl(abs(phi*delta + delta), p_given_distance)
df <- bind_rows(df, tibble(delta = delta, phi = phi, acc= (p1+p2)/2))
}
df$delta <- as.factor(df$delta)
ggplot(df, aes(x = phi, y = acc, colour = delta, group = delta)) +
geom_path() +
see::theme_blackboard() +
ggtitle("different standing positions") -> plt2
get_best_phi <- function(d) {
df_d <-filter(df, delta == d)
idx <- which(df_d$acc == max(df_d$acc))
return(tibble(delta = d, phi = df_d$phi[idx], acc = max(df_d$acc)))
}
get_best_phi(4)
df_opt <- map_df(deltas, get_best_phi)
ggplot(df_opt, aes(x = delta, y = phi)) +
geom_path(size = 2, colour = "skyblue") +
see::theme_blackboard() +
ggtitle("optimal strategy") -> plt3
plt <- plt1 + plt2 + plt3
ggsave("breaking_assumptions.pdf", width = 10, height = 4) | /Analyses/some_simulation_difficulty_rule.R | no_license | warren-james/Breaking_symmetry | R | false | false | 1,835 | r | library(tidyverse)
library(patchwork)
# Function 1
# linear
tibble(x = c(0, 2, 3, 4,5, 7), y = c(1, 0.8, 0.7, 0.69, 0.65, 0)) %>%
mutate(x2 = x^2, x3 = x^3) -> df
m <- lm(data = df, y ~ x +x2 + x3)
plot(seq(0, 10, 0.1), predict(m, tibble(x = seq(0, 10, 0.1), x2 = x^2, x3 = x^2)))
ggplot(df, aes(x, y)) + geom_point() + stat_smooth(method="lm", se=TRUE, fill=NA,
formula=y ~ poly(x, 3, raw=TRUE),colour="red")
p_given_distance <- function(d) {
e <- max(0, 1 - 0.25*d + 0.08*d^2 - 0.01*d^3)
e <- min(1, e)
return(e)
}
# generate simulted psychometric curve (assuming standing in the center)
phi = 0
deltas = seq(0, 8, 0.01)
tibble(
delta = deltas,
p = map_dbl(abs(phi*deltas - deltas), p_given_distance)) %>%
ggplot(aes(x = delta, y = p)) + geom_path(colour = "white", size = 2) +
see::theme_blackboard() +
ggtitle("Psychometric Curve") -> plt1
phi = seq(0, 1, 0.01)
deltas <- seq(1, 6, 0.2)
df = tibble()
for (delta in deltas) {
p1 <- map_dbl(abs(phi*delta - delta), p_given_distance)
p2 <- map_dbl(abs(phi*delta + delta), p_given_distance)
df <- bind_rows(df, tibble(delta = delta, phi = phi, acc= (p1+p2)/2))
}
df$delta <- as.factor(df$delta)
ggplot(df, aes(x = phi, y = acc, colour = delta, group = delta)) +
geom_path() +
see::theme_blackboard() +
ggtitle("different standing positions") -> plt2
get_best_phi <- function(d) {
df_d <-filter(df, delta == d)
idx <- which(df_d$acc == max(df_d$acc))
return(tibble(delta = d, phi = df_d$phi[idx], acc = max(df_d$acc)))
}
get_best_phi(4)
df_opt <- map_df(deltas, get_best_phi)
ggplot(df_opt, aes(x = delta, y = phi)) +
geom_path(size = 2, colour = "skyblue") +
see::theme_blackboard() +
ggtitle("optimal strategy") -> plt3
plt <- plt1 + plt2 + plt3
ggsave("breaking_assumptions.pdf", width = 10, height = 4) |
library(tidyverse)
annual <- read_table("ftp://aftp.cmdl.noaa.gov/products/trends/co2/co2_annmean_mlo.txt", skip = 56)
library(ggplot2)
install.packages("ggthemes")
library(ggthemes)
plot <- ggplot(annual, aes(year, mean)) + geom_line() +
labs(x = "Year", y = "Annual", title = "Annual Mean Carbon dioxide 1958 - 2019") +
theme_bw()
library(dplyr)
new<- annual %>%
arrange(desc(year)) %>%
top_n(n = 10)
library(kableExtra)
kable(head(slice(new, desc(year))), format = "simple",
align = "c", caption = 'Annual Means for new decade')
plot | /week_08/case_study_08.R | no_license | geo511-2020/geo511-2020-tasks-shruti8297 | R | false | false | 566 | r |
library(tidyverse)
annual <- read_table("ftp://aftp.cmdl.noaa.gov/products/trends/co2/co2_annmean_mlo.txt", skip = 56)
library(ggplot2)
install.packages("ggthemes")
library(ggthemes)
plot <- ggplot(annual, aes(year, mean)) + geom_line() +
labs(x = "Year", y = "Annual", title = "Annual Mean Carbon dioxide 1958 - 2019") +
theme_bw()
library(dplyr)
new<- annual %>%
arrange(desc(year)) %>%
top_n(n = 10)
library(kableExtra)
kable(head(slice(new, desc(year))), format = "simple",
align = "c", caption = 'Annual Means for new decade')
plot |
# This code will be used to producing a map of locations for the
# methods section of the morph chapter.
workwd <- 'C:/Users/z5188231/Desktop/Coding/Scripts/AdditionalWork/AdamPaper/Maps'
setwd(workwd)
# Now we can bring in the data for analysis. The data we will be
# looking at
data <- read.table('key-file_morph_env_allplates.txt', sep = '\t',
header = TRUE)
# First we will get the mean location for each of our collection
# localities.
dflon <- do.call(data.frame, aggregate(x ~ location,
data = data,
function(x) c(mean(x),
length(x)),
na.action = na.omit))
dflat <- do.call(data.frame, aggregate(y ~ location,
data = data,
function(x) c(mean(x),
length(x)),
na.action = na.omit))
location.dat <- data.frame(dflat[,1:2], dflon[,2:3])
location.dat
colnames(location.dat) <- c("loc", "lat", "lon", "n")
loc.dat <- location.dat[order(location.dat$lat),]
ord <- read.table("pop_ordered_across_aust.txt", header = FALSE, sep = '\t')
loc.dat <- merge(loc.dat, ord, by.x = 'loc', by.y = 'V1')
colnames(loc.dat)[5] <- 'mapNo'
genGrp <- data[, c('location', 'genpopW.B.BS')]
genGrp <- do.call(data.frame, aggregate(genpopW.B.BS ~ location,
data = data,
function(x) median(x),
na.action = na.omit))
loc.dat <- merge(loc.dat, genGrp, by.x = 'loc', by.y = 'location', all.y = FALSE)
loc.dat$samp <- ifelse(loc.dat$mapNo < 2, 0,
ifelse(loc.dat$mapNo < 14, 1,
ifelse(loc.dat$mapNo < 16, 2,
ifelse(loc.dat$mapNo == 18, 2, 3))))
loc.dat$Col <- ifelse(loc.dat$mapNo < 2, 'plum2',
ifelse(loc.dat$mapNo < 14, 'khaki',
ifelse(loc.dat$mapNo < 16, 'sandybrown',
ifelse(loc.dat$mapNo == 18, 'sandybrown', 'powderblue'))))
loc.dat$Env <- ifelse(loc.dat$mapNo < 7, 21,
ifelse(loc.dat$mapNo == 8, 22,
ifelse(loc.dat$mapNo < 14 | loc.dat$mapNo == 16 | loc.dat$mapNo == 17 | loc.dat$mapNo == 20 | loc.dat$mapNo == 23, 22, 23)))
#install.packages("sp")
#install.packages("proj4")
#install.packages("raster")
#install.packages("rgeos")
#install.packages("rgdal")
#install.packages("maptools")
#install.packages("maps")
#install.packages("mapdata")
#install.packages("scales")
#install.packages("plotrix")
#install.packages("mapproj")
#install.packages("multichull")
#install.packages("rgeos")
library(multichull)
library(sp)
library(proj4)
library(raster)
library(rgeos)
library(rgdal)
library(maptools)
library(maps)
library(mapdata)
library(scales)
library(plotrix)
library(mapproj)
library(rgeos)
coordinates(loc.dat) <- ~ lon + lat
projection(loc.dat) <- '+proj=longlat +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +no_defs'
loc.dat$cex <- loc.dat$n/10
# Now that we have the locations imported and the mean values
# we will bring in the australian map.
#map('worldHires', 'Australia', xlim = c(110, 155),
# ylim = c(-44, -10), col = 'cornsilk', fill = TRUE)
aus <- readShapePoly("ausMap-states/AUS_adm1.shp")
plot(NA, xlim = c(110, 155), ylim = c(-44, -10), xlab="", ylab="", xaxt="n", yaxt="n", bty="n")
plot(aus, add = T, col = "grey85", border = "grey10", lwd=0.25)
points(loc.dat, pch = 22, cex = 2.75, lwd = 1,
col = 'black', bg = loc.dat$Col)
text(loc.dat, labels = loc.dat$mapNo, cex = 0.75, col = 'black')
axis(1,las=1)
axis(2,las=1)
box()
compassRose(115, -40, cex = 0.6)
map.scale(120, -41, metric = TRUE, ratio = FALSE)
# Now we want to create a map that describes the genetic and environmental
# clustering.
minx <- min(data$x) - 1.5
maxx <- max(data$x) + 1.5
miny <- min(data$y) - 1.5
maxy <- max(data$y) + 1.5
#Environmental groupings
location.dat3 <- data.frame(location.dat$lon,location.dat$lat,loc.dat@data[["mapNo"]])
location.dat3
#Lon/Lat for great dividing range
GDR <- read.csv("GreatDivRange.txt", sep="\t", quote = "")
GDR <- data.frame(GDR)
#More detailed map
pdf("rplot.pdf")
plot(NA, xlim = c(minx, maxx), ylim = c(miny, maxy), xlab="", ylab="", xaxt="n", yaxt="n", bty="n")
plot(aus, add = T, col = "grey80", border = "grey10", lwd=0.25)
points(loc.dat, pch = loc.dat$Env, cex = 2.75, lwd = 1,
col = 'black', bg = loc.dat$Col)
lines(x = GDR$Lon, y = GDR$Lat, col = "blue", lwd = 2)
text(loc.dat, labels = loc.dat$mapNo, cex = 0.75, col = 'black')
plot(hull,add=T,border="green")
axis(1,las=1)
axis(2,las=1)
box()
compassRose(122, -42, cex = 0.8)
map.scale(127, -42, metric = TRUE, ratio = FALSE)
dev.off()
#Unused Chull code
arid <- location.dat3[c(4,5,13,15,20,22),]
semiarid <- location.dat3[c(2,6,9,11,12,16,18,19,23,24),]
nonarid <- location.dat3[c(1,3,7,8,10,14,17,21),]
hpts <- chull(arid)
hpts2 <- c(hpts, hpts[1])
hpts2
lines(arid[hpts2, ], col = 'blue')
hpts <- chull(semiarid)
hpts
hpts2 <- c(hpts, hpts[1])
hpts2
lines(semiarid[hpts2, ], col = 'green')
hpts <- chull(nonarid)
hpts
hpts2 <- c(hpts, hpts[1])
hpts2
lines(nonarid[hpts2, ], col = 'red')
| /Scripts/Sample_distribution_map.R | no_license | schnappi-wkl/Sv1_StarlingGBS | R | false | false | 5,585 | r | # This code will be used to producing a map of locations for the
# methods section of the morph chapter.
workwd <- 'C:/Users/z5188231/Desktop/Coding/Scripts/AdditionalWork/AdamPaper/Maps'
setwd(workwd)
# Now we can bring in the data for analysis. The data we will be
# looking at
data <- read.table('key-file_morph_env_allplates.txt', sep = '\t',
header = TRUE)
# First we will get the mean location for each of our collection
# localities.
dflon <- do.call(data.frame, aggregate(x ~ location,
data = data,
function(x) c(mean(x),
length(x)),
na.action = na.omit))
dflat <- do.call(data.frame, aggregate(y ~ location,
data = data,
function(x) c(mean(x),
length(x)),
na.action = na.omit))
location.dat <- data.frame(dflat[,1:2], dflon[,2:3])
location.dat
colnames(location.dat) <- c("loc", "lat", "lon", "n")
loc.dat <- location.dat[order(location.dat$lat),]
ord <- read.table("pop_ordered_across_aust.txt", header = FALSE, sep = '\t')
loc.dat <- merge(loc.dat, ord, by.x = 'loc', by.y = 'V1')
colnames(loc.dat)[5] <- 'mapNo'
genGrp <- data[, c('location', 'genpopW.B.BS')]
genGrp <- do.call(data.frame, aggregate(genpopW.B.BS ~ location,
data = data,
function(x) median(x),
na.action = na.omit))
loc.dat <- merge(loc.dat, genGrp, by.x = 'loc', by.y = 'location', all.y = FALSE)
loc.dat$samp <- ifelse(loc.dat$mapNo < 2, 0,
ifelse(loc.dat$mapNo < 14, 1,
ifelse(loc.dat$mapNo < 16, 2,
ifelse(loc.dat$mapNo == 18, 2, 3))))
loc.dat$Col <- ifelse(loc.dat$mapNo < 2, 'plum2',
ifelse(loc.dat$mapNo < 14, 'khaki',
ifelse(loc.dat$mapNo < 16, 'sandybrown',
ifelse(loc.dat$mapNo == 18, 'sandybrown', 'powderblue'))))
loc.dat$Env <- ifelse(loc.dat$mapNo < 7, 21,
ifelse(loc.dat$mapNo == 8, 22,
ifelse(loc.dat$mapNo < 14 | loc.dat$mapNo == 16 | loc.dat$mapNo == 17 | loc.dat$mapNo == 20 | loc.dat$mapNo == 23, 22, 23)))
#install.packages("sp")
#install.packages("proj4")
#install.packages("raster")
#install.packages("rgeos")
#install.packages("rgdal")
#install.packages("maptools")
#install.packages("maps")
#install.packages("mapdata")
#install.packages("scales")
#install.packages("plotrix")
#install.packages("mapproj")
#install.packages("multichull")
#install.packages("rgeos")
library(multichull)
library(sp)
library(proj4)
library(raster)
library(rgeos)
library(rgdal)
library(maptools)
library(maps)
library(mapdata)
library(scales)
library(plotrix)
library(mapproj)
library(rgeos)
coordinates(loc.dat) <- ~ lon + lat
projection(loc.dat) <- '+proj=longlat +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +no_defs'
loc.dat$cex <- loc.dat$n/10
# Now that we have the locations imported and the mean values
# we will bring in the australian map.
#map('worldHires', 'Australia', xlim = c(110, 155),
# ylim = c(-44, -10), col = 'cornsilk', fill = TRUE)
aus <- readShapePoly("ausMap-states/AUS_adm1.shp")
plot(NA, xlim = c(110, 155), ylim = c(-44, -10), xlab="", ylab="", xaxt="n", yaxt="n", bty="n")
plot(aus, add = T, col = "grey85", border = "grey10", lwd=0.25)
points(loc.dat, pch = 22, cex = 2.75, lwd = 1,
col = 'black', bg = loc.dat$Col)
text(loc.dat, labels = loc.dat$mapNo, cex = 0.75, col = 'black')
axis(1,las=1)
axis(2,las=1)
box()
compassRose(115, -40, cex = 0.6)
map.scale(120, -41, metric = TRUE, ratio = FALSE)
# Now we want to create a map that describes the genetic and environmental
# clustering.
minx <- min(data$x) - 1.5
maxx <- max(data$x) + 1.5
miny <- min(data$y) - 1.5
maxy <- max(data$y) + 1.5
#Environmental groupings
location.dat3 <- data.frame(location.dat$lon,location.dat$lat,loc.dat@data[["mapNo"]])
location.dat3
#Lon/Lat for great dividing range
GDR <- read.csv("GreatDivRange.txt", sep="\t", quote = "")
GDR <- data.frame(GDR)
#More detailed map
pdf("rplot.pdf")
plot(NA, xlim = c(minx, maxx), ylim = c(miny, maxy), xlab="", ylab="", xaxt="n", yaxt="n", bty="n")
plot(aus, add = T, col = "grey80", border = "grey10", lwd=0.25)
points(loc.dat, pch = loc.dat$Env, cex = 2.75, lwd = 1,
col = 'black', bg = loc.dat$Col)
lines(x = GDR$Lon, y = GDR$Lat, col = "blue", lwd = 2)
text(loc.dat, labels = loc.dat$mapNo, cex = 0.75, col = 'black')
plot(hull,add=T,border="green")
axis(1,las=1)
axis(2,las=1)
box()
compassRose(122, -42, cex = 0.8)
map.scale(127, -42, metric = TRUE, ratio = FALSE)
dev.off()
#Unused Chull code
arid <- location.dat3[c(4,5,13,15,20,22),]
semiarid <- location.dat3[c(2,6,9,11,12,16,18,19,23,24),]
nonarid <- location.dat3[c(1,3,7,8,10,14,17,21),]
hpts <- chull(arid)
hpts2 <- c(hpts, hpts[1])
hpts2
lines(arid[hpts2, ], col = 'blue')
hpts <- chull(semiarid)
hpts
hpts2 <- c(hpts, hpts[1])
hpts2
lines(semiarid[hpts2, ], col = 'green')
hpts <- chull(nonarid)
hpts
hpts2 <- c(hpts, hpts[1])
hpts2
lines(nonarid[hpts2, ], col = 'red')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/syntheticMultipletsFromCounts.R
\name{syntheticSinglets}
\alias{syntheticSinglets}
\title{syntheticSinglets}
\arguments{
\item{nGenes}{Number of genes in generated synthetic data.}
\item{nCells}{Number of cells per cell type in generated synthetic data.}
\item{nCellTypes}{Number of cell types in generated synthetic data.}
\item{...}{additional arguments to pass on}
}
\value{
A matrix with synthetic counts.
}
\description{
This unit uses the negative binomial distribution to synthesize the
singlets.
}
\examples{
synth <- syntheticSinglets(10, 10, 10)
}
\author{
Jason T. Serviss
}
\keyword{internal}
| /man/syntheticSinglets.Rd | no_license | jasonserviss/CIMseq.testing | R | false | true | 688 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/syntheticMultipletsFromCounts.R
\name{syntheticSinglets}
\alias{syntheticSinglets}
\title{syntheticSinglets}
\arguments{
\item{nGenes}{Number of genes in generated synthetic data.}
\item{nCells}{Number of cells per cell type in generated synthetic data.}
\item{nCellTypes}{Number of cell types in generated synthetic data.}
\item{...}{additional arguments to pass on}
}
\value{
A matrix with synthetic counts.
}
\description{
This unit uses the negative binomial distribution to synthesize the
singlets.
}
\examples{
synth <- syntheticSinglets(10, 10, 10)
}
\author{
Jason T. Serviss
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairwise_constraints_clustering.R
\name{ckmeansSSLR}
\alias{ckmeansSSLR}
\title{General Interface COP K-Means Algorithm}
\usage{
ckmeansSSLR(n_clusters = NULL, mustLink = NULL, cantLink = NULL, max_iter = 10)
}
\arguments{
\item{n_clusters}{A number of clusters to be considered. Default is NULL (num classes)}
\item{mustLink}{A list of must-link constraints. NULL Default, constrints same label}
\item{cantLink}{A list of cannot-link constraints. NULL Default, constrints with different label}
\item{max_iter}{maximum iterations in KMeans. Default is 10}
}
\description{
Model from conclust \cr
This function takes an unlabeled dataset and two lists of must-link and cannot-link constraints
as input and produce a clustering as output.
}
\note{
This models only returns labels, not centers
}
\examples{
library(tidyverse)
library(caret)
library(SSLR)
library(tidymodels)
data <- iris
set.seed(1)
#\% LABELED
cls <- which(colnames(iris) == "Species")
labeled.index <- createDataPartition(data$Species, p = .2, list = FALSE)
data[-labeled.index,cls] <- NA
m <- ckmeansSSLR() \%>\% fit(Species ~ ., data)
#Get labels (assing clusters), type = "raw" return factor
labels <- m \%>\% cluster_labels()
print(labels)
}
\references{
Wagstaff, Cardie, Rogers, Schrodl\cr
\emph{Constrained K-means Clustering with Background Knowledge}\cr
2001
}
| /man/ckmeansSSLR.Rd | no_license | cran/SSLR | R | false | true | 1,426 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairwise_constraints_clustering.R
\name{ckmeansSSLR}
\alias{ckmeansSSLR}
\title{General Interface COP K-Means Algorithm}
\usage{
ckmeansSSLR(n_clusters = NULL, mustLink = NULL, cantLink = NULL, max_iter = 10)
}
\arguments{
\item{n_clusters}{A number of clusters to be considered. Default is NULL (num classes)}
\item{mustLink}{A list of must-link constraints. NULL Default, constrints same label}
\item{cantLink}{A list of cannot-link constraints. NULL Default, constrints with different label}
\item{max_iter}{maximum iterations in KMeans. Default is 10}
}
\description{
Model from conclust \cr
This function takes an unlabeled dataset and two lists of must-link and cannot-link constraints
as input and produce a clustering as output.
}
\note{
This models only returns labels, not centers
}
\examples{
library(tidyverse)
library(caret)
library(SSLR)
library(tidymodels)
data <- iris
set.seed(1)
#\% LABELED
cls <- which(colnames(iris) == "Species")
labeled.index <- createDataPartition(data$Species, p = .2, list = FALSE)
data[-labeled.index,cls] <- NA
m <- ckmeansSSLR() \%>\% fit(Species ~ ., data)
#Get labels (assing clusters), type = "raw" return factor
labels <- m \%>\% cluster_labels()
print(labels)
}
\references{
Wagstaff, Cardie, Rogers, Schrodl\cr
\emph{Constrained K-means Clustering with Background Knowledge}\cr
2001
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prioritization.R
\name{get_neighbor}
\alias{get_neighbor}
\title{Title get neighbor of a node}
\usage{
get_neighbor(node, net)
}
\arguments{
\item{node}{a gene}
\item{net}{a network}
}
\value{
a vector of gene
}
\description{
Title get neighbor of a node
}
| /man/get_neighbor.Rd | no_license | cran/prioGene | R | false | true | 337 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prioritization.R
\name{get_neighbor}
\alias{get_neighbor}
\title{Title get neighbor of a node}
\usage{
get_neighbor(node, net)
}
\arguments{
\item{node}{a gene}
\item{net}{a network}
}
\value{
a vector of gene
}
\description{
Title get neighbor of a node
}
|
#' Score version for smooth direct designs
#'
#' \code{score_direct_smooth} gives the version of the score that is needed for \link{stage_two}.
#'
#' @param parameters Parameters specifying the design
#' @param n1 First stage sample size
#' @param n2 n_2-values
#' @param h Distance between two nodes
#' @param N 4N+1 gives the number of nodes
#' @param w nodes inside the interval (cf,ce)
#'
score_direct_smooth <- function(parameters, n1, n2, h, N, w){
#o <- omega(N)
o <- c(1, rep(2,(N-1)), 1)
# x = c(n2, w, omega)
sc <- function(x) {
y <- x[1] * dnorm( x[2] - sqrt(n1) * parameters$mu ) * x[3]
return(y)
}
y <- apply( cbind(n2, w, o), 1, sc)
#p <- (2*h)/45*sum(y)
p <- (h/2) * sum(y)
p <- p + n1
return(p)
}
#' Smooth score version
#'
#' \code{score_smooth} gives the version of the score that is needed for \link{score_direct_smooth}.
#'
#' @param Parameters Parameters specifying the design
#' @param cf Boundary for stopping for futility
#' @param ce Boundary for stopping for efficacy
#' @param n1 Stage one sample size
#'
#' @export
score_smooth <- function(parameters, cf, ce, n1){
N = 12
h = (ce-cf)/(N)
w <- seq(cf,ce,h)
s2 <- stage_two(parameters, cf, ce, n1)
n2 <- s2[ (length(s2)/2 + 1) : length(s2)]
p <- score_direct_smooth(parameters,n1,n2,h,N,w)
return(p)
}
#' Type I version for smooth direct designs
#'
#' \code{type_one_smooth} gives the version of the type I error that is needed for \link{stage_two}.
#'
#' @param parameters Parameters specifying the design
#' @param cf Boundary for stopping for futility
#' @param c2 c_2-values
#' @param h Distance between two nodes
#' @param N 4N+1 gives the number of nodes
#' @param w nodes inside the interval (cf,ce)
#'
type_one_smooth <- function(parameters, cf, c2, h, N, w){
#omega <- omega(N)
alpha <- c(1, rep(2,(N-1)), 1)
# x = c(c2, w, alpha)
to <- function(x){
pnorm(x[1]) * dnorm(x[2]) * x[3]
}
y <- apply(cbind(c2, w, alpha), 1, to)
# p <- (2*h)/45*(t(omega)%*%y)
p <- (h/2) * sum(y)
p <- 1 - pnorm(cf) - p
return(p)
}
#' Type II version for smooth direct designs
#'
#' \code{type_two_smooth} gives the version of the type II error that is needed for \link{stage_two}.
#'
#' @param parameters Parameters specifying the design
#' @param cf Boundary for stopping for futility
#' @param c2 c_2-values
#' @param n1 First stage sample size
#' @param n2 n_2-values
#' @param h Distance between two nodes
#' @param N 4N+1 gives the number of nodes
#' @param w nodes inside the interval (cf,ce)
#'
type_two_smooth <- function(parameters, cf, c2, n1, n2, h, N, w){
#omega <- omega(N)
alpha <- c(1, rep(2,(N-1)), 1)
# x = c(c2, n2, w, alpha)
tt <- function(x) {
y <- x[4] * pnorm(x[1] - sqrt(abs(x[2])) * parameters$mu ) *
dnorm(x[3] - sqrt(n1) * parameters$mu)
return(y)
}
y <- apply(cbind(c2,n2,w,alpha), 1, tt)
#p <- (2*h)/45*(t(omega)%*%y)
p <- (h/2) * sum(y)
p <- p + pnorm(cf - sqrt(n1) * parameters$mu)
return(p)
}
#' Compute optimal stage two values for given first stage
#'
#' \code{stage_two} computes the functions c_2 and n_2 that hold the error constraints and are optimal w.r.t.
#' expected sample size under the alternative for a prespecified first stage.
#'
#' \code{stage_two} is the base of \link{direct_design_smooth}.
#'
#' @param parameters Parameters specifying the design
#' @param cf Boundary for stopping for futility
#' @param ce Boundary for stopping for efficacy
#' @param n1 First stage sample size
#'
#' @return A vector. The first half give the c_2, and the second the n_2-values, on a equidistance grid inside the
#' interval (cf,ce).
#'
#' @export
stage_two <- function(parameters, cf, ce, n1){
N=12
h=(ce-cf)/(N)
w <- seq(cf, ce, h)
k <- optimal_gsd(parameters)
start_n2 <- rep( ceiling( k$n2( k$cf + (k$ce-k$cf) / 2 ) ) , length(w) )
start_c2 <- rep( k$c2( k$cf / 2 + k$ce / 2 ) , length(w) )
score_min <- function(n2){ score_direct_smooth(parameters, n1, n2, h, N, w) }
t_1 <- function(c2){ type_one_smooth(parameters, cf, c2, h, N, w) }
t_2 <- function(c2, n2){ type_two_smooth(parameters, cf, c2, n1, n2, h, N, w) }
optimum <- nloptr::nloptr(
x0 = c(start_c2,start_n2),
eval_f = function(x) score_min(x[(length(w)+1) : (2*length(w))]),
eval_g_ineq = function(x) c( t_1(x[1:length(w)]) - parameters$alpha,
t_2(x[1:length(w)],x[(length(w)+1) : (2*length(w))]) - parameters$beta),
lb = c(rep(-1,length(w)),rep(1,length(w))),
ub = c(rep(4,length(w)),rep(Inf,length(w))),
opts = list(
algorithm = "NLOPT_LN_COBYLA",
xtol_rel = 0.0001,
maxeval = 99920000
)
)
c2 <- optimum$solution[1:length(w)]
n2 <- optimum$solution[(length(w)+1) : (2*length(w))]
r1 <- t_1(c2) / parameters$alpha
r2 <- t_2(c2, optimum$solution[(length(w)+1) : (2*length(w))]) / parameters$beta
if(abs(1-r1)<0.05 && abs(1-r2) < 0.05){ n2 <- optimum$solution[(length(w)+1) : (2*length(w))]}
else{n2 <- rep(99999,length(w))}
return(c(c2, n2))
}
| /R/StageTwo.R | no_license | MatheMax/OptReSample | R | false | false | 5,084 | r | #' Score version for smooth direct designs
#'
#' \code{score_direct_smooth} gives the version of the score that is needed for \link{stage_two}.
#'
#' @param parameters Parameters specifying the design
#' @param n1 First stage sample size
#' @param n2 n_2-values
#' @param h Distance between two nodes
#' @param N 4N+1 gives the number of nodes
#' @param w nodes inside the interval (cf,ce)
#'
score_direct_smooth <- function(parameters, n1, n2, h, N, w){
#o <- omega(N)
o <- c(1, rep(2,(N-1)), 1)
# x = c(n2, w, omega)
sc <- function(x) {
y <- x[1] * dnorm( x[2] - sqrt(n1) * parameters$mu ) * x[3]
return(y)
}
y <- apply( cbind(n2, w, o), 1, sc)
#p <- (2*h)/45*sum(y)
p <- (h/2) * sum(y)
p <- p + n1
return(p)
}
#' Smooth score version
#'
#' \code{score_smooth} gives the version of the score that is needed for \link{score_direct_smooth}.
#'
#' @param Parameters Parameters specifying the design
#' @param cf Boundary for stopping for futility
#' @param ce Boundary for stopping for efficacy
#' @param n1 Stage one sample size
#'
#' @export
score_smooth <- function(parameters, cf, ce, n1){
N = 12
h = (ce-cf)/(N)
w <- seq(cf,ce,h)
s2 <- stage_two(parameters, cf, ce, n1)
n2 <- s2[ (length(s2)/2 + 1) : length(s2)]
p <- score_direct_smooth(parameters,n1,n2,h,N,w)
return(p)
}
#' Type I version for smooth direct designs
#'
#' \code{type_one_smooth} gives the version of the type I error that is needed for \link{stage_two}.
#'
#' @param parameters Parameters specifying the design
#' @param cf Boundary for stopping for futility
#' @param c2 c_2-values
#' @param h Distance between two nodes
#' @param N 4N+1 gives the number of nodes
#' @param w nodes inside the interval (cf,ce)
#'
type_one_smooth <- function(parameters, cf, c2, h, N, w){
#omega <- omega(N)
alpha <- c(1, rep(2,(N-1)), 1)
# x = c(c2, w, alpha)
to <- function(x){
pnorm(x[1]) * dnorm(x[2]) * x[3]
}
y <- apply(cbind(c2, w, alpha), 1, to)
# p <- (2*h)/45*(t(omega)%*%y)
p <- (h/2) * sum(y)
p <- 1 - pnorm(cf) - p
return(p)
}
#' Type II version for smooth direct designs
#'
#' \code{type_two_smooth} gives the version of the type II error that is needed for \link{stage_two}.
#'
#' @param parameters Parameters specifying the design
#' @param cf Boundary for stopping for futility
#' @param c2 c_2-values
#' @param n1 First stage sample size
#' @param n2 n_2-values
#' @param h Distance between two nodes
#' @param N 4N+1 gives the number of nodes
#' @param w nodes inside the interval (cf,ce)
#'
type_two_smooth <- function(parameters, cf, c2, n1, n2, h, N, w){
#omega <- omega(N)
alpha <- c(1, rep(2,(N-1)), 1)
# x = c(c2, n2, w, alpha)
tt <- function(x) {
y <- x[4] * pnorm(x[1] - sqrt(abs(x[2])) * parameters$mu ) *
dnorm(x[3] - sqrt(n1) * parameters$mu)
return(y)
}
y <- apply(cbind(c2,n2,w,alpha), 1, tt)
#p <- (2*h)/45*(t(omega)%*%y)
p <- (h/2) * sum(y)
p <- p + pnorm(cf - sqrt(n1) * parameters$mu)
return(p)
}
#' Compute optimal stage two values for given first stage
#'
#' \code{stage_two} computes the functions c_2 and n_2 that hold the error constraints and are optimal w.r.t.
#' expected sample size under the alternative for a prespecified first stage.
#'
#' \code{stage_two} is the base of \link{direct_design_smooth}.
#'
#' @param parameters Parameters specifying the design
#' @param cf Boundary for stopping for futility
#' @param ce Boundary for stopping for efficacy
#' @param n1 First stage sample size
#'
#' @return A vector. The first half give the c_2, and the second the n_2-values, on a equidistance grid inside the
#' interval (cf,ce).
#'
#' @export
stage_two <- function(parameters, cf, ce, n1){
N=12
h=(ce-cf)/(N)
w <- seq(cf, ce, h)
k <- optimal_gsd(parameters)
start_n2 <- rep( ceiling( k$n2( k$cf + (k$ce-k$cf) / 2 ) ) , length(w) )
start_c2 <- rep( k$c2( k$cf / 2 + k$ce / 2 ) , length(w) )
score_min <- function(n2){ score_direct_smooth(parameters, n1, n2, h, N, w) }
t_1 <- function(c2){ type_one_smooth(parameters, cf, c2, h, N, w) }
t_2 <- function(c2, n2){ type_two_smooth(parameters, cf, c2, n1, n2, h, N, w) }
optimum <- nloptr::nloptr(
x0 = c(start_c2,start_n2),
eval_f = function(x) score_min(x[(length(w)+1) : (2*length(w))]),
eval_g_ineq = function(x) c( t_1(x[1:length(w)]) - parameters$alpha,
t_2(x[1:length(w)],x[(length(w)+1) : (2*length(w))]) - parameters$beta),
lb = c(rep(-1,length(w)),rep(1,length(w))),
ub = c(rep(4,length(w)),rep(Inf,length(w))),
opts = list(
algorithm = "NLOPT_LN_COBYLA",
xtol_rel = 0.0001,
maxeval = 99920000
)
)
c2 <- optimum$solution[1:length(w)]
n2 <- optimum$solution[(length(w)+1) : (2*length(w))]
r1 <- t_1(c2) / parameters$alpha
r2 <- t_2(c2, optimum$solution[(length(w)+1) : (2*length(w))]) / parameters$beta
if(abs(1-r1)<0.05 && abs(1-r2) < 0.05){ n2 <- optimum$solution[(length(w)+1) : (2*length(w))]}
else{n2 <- rep(99999,length(w))}
return(c(c2, n2))
}
|
################################################################################
### Heranalyse Toetsanalyse meerdere versies stap 2.R
################################################################################
### R code voor Tentamenanalyse Vrije Universiteit Amsterdam
###
### Bestandsnaam: Heranalyse Toetsanalyse meerdere versies stap 2.R
### Doel: Analyseren van teleform tentamendata voor
### tentamen met meerdere versies
###
### Afhankelijkheden: geen
###
### Gebruikte datasets: Teleform .DEL bestand
###
### Opmerkingen:
###
################################################################################
### TODO:
### 1) Testen
###
################################################################################
### Geschiedenis:
### 09-08-2018: DD: Aanmaken bestand
################################################################################
## Vervang lege cellen met NA zodat deze goed gescoord worden
data[] <- lapply(data, str_trim)
is.na(data) <- data==''
##Transformeren van ruwe letter_data naar score data + basale analyse
scored_data <- score_mc(data, sleutel, multiKeySep = ",",
output.scored = TRUE, rel = TRUE)
## Maak een afleideranalyse op basis van gegeven antwoorden en sleutel
rar_analyse <- distractorAnalysis(data, sleutel, multiKeySep = ",", nGroups=3) %>% bind_rows(.id = "id") %>%
dplyr:: select(id, key, pBis) %>% spread(key = key, value = pBis)
tsleutel <- t(sleutel) %>% as.data.frame() %>%
rownames_to_column(var = "id") %>%
dplyr:: select(id, sleutel = V1)
rar_analyse <- rar_analyse %>%
left_join(tsleutel) %>%
mutate(vraagnummer = readr:: parse_number(id)) %>%
arrange(vraagnummer) %>%
dplyr:: select(vraagnummer,
sleutel,
everything()) %>%
dplyr:: select(-id)
studentnummers_namen <- teleformdata_correct[1:2]
##Toevoegen studentnummers en namen aan score data
scored_datax <- cbind(studentnummers_namen, scored_data$scored)
##Toevoegen studentnummers aan totaalscore student
total_score <- cbind(studentnummers_namen, scored_data[1])
##Transformeer scores naar cijfers
total_score <- mutate(total_score, cijfer = (10-(nrq-total_score$score)/(nrq-cesuur)*(10-5.5)))
total_score <- total_score %>% mutate(cijfer = replace(cijfer, cijfer<1, 1))
total_score <- dplyr:: rename(total_score, studentnamen = stud_naam, studentnummers = stud_nr) %>%
mutate(studentnummers = as.integer(studentnummers))
## Toon cronbachs alpha
KR20 <- purrr:: pluck(scored_data, 2, "alpha")
# KR20 <- scored_data$reliability$alpha
##Bereken KR-20 (75)
ifactor <- 75/nrq
KR20_75 <- round(CTT:: spearman.brown(KR20, input = ifactor, n.or.r = "n")$r.new, digits = 2)
##Item characteristic curves (ICC) voor alle items op 1 pagina
##(verwijder eerste 2 regels script om losse plots te creeren)
# par(mfrow=c(4,5))
# par(cex = 0.4)
# for ( i in 1:nrq ) cttICC(scored_data$score, scored_data$scored[,i],
# colTheme="spartans", cex=1.5, ylab=names(sleutel[i]))
##Maak itemanalyse
itemanalyse <- itemAnalysis(as.data.frame(scored_data$scored), NA.Delete=FALSE)$itemReport %>%
dplyr:: select(-bis) %>%
dplyr::rename(P_waarde = itemMean,
rir = pBis,
"New Alpha" = alphaIfDeleted)
##NA vervangen met nullen
itemanalyse[is.na(itemanalyse)] <- 0
##Bereken relatieve p-waarde
itemanalyse <- itemanalyse %>%
mutate(Rel_P = ((-1/(gk-1))*P_waarde+1-(-1/(gk-1))))
##Toetswaarden wegschrijven
geslaagd <- filter(total_score, cijfer >= 5.5) %>% nrow()
toets <- tbl_df(scored_data$reliability[1:5]) %>% round(digits = 2)
toets <- mutate(toets, KR20_75 = KR20_75) %>%
dplyr:: select(nItem,
nPerson,
alpha,
KR20_75,
scaleMean,
scaleSD) %>%
dplyr:: mutate(meanRelP = round(summarise(itemanalyse, mean(Rel_P))$`mean(Rel_P)`, digits = 2),
meanP = round(summarise(itemanalyse, mean(P_waarde))$`mean(P_waarde)`, digits = 2),
perc_geslaagd = paste0(round(geslaagd/nrow(total_score)*100),"%"),
cesuur = cesuur)
##Berekenen kappa
kappa <- round(((KR20)*(toets$scaleSD^2)+(toets$scaleMean-cesuur)^2)/((toets$scaleSD^2) + (toets$scaleMean-cesuur)^2), digits = 2)
toets <- mutate(toets, kappa = as.numeric(kappa))
##Bepaal aantal studenten
nrst <- toets$nPerson
## Vervang NA in data door lege cel
data[is.na(data)] <- " "
##Toevoegen A-waarde aan itemanalyse
itemanalyse["A"] <- NA
itemanalyse["B"] <- NA
if (nra >= 3) {
itemanalyse["C"] <- NA
}
if (nra >= 4 ) {
itemanalyse["D"] <- NA
}
if (nra >= 5) {
itemanalyse["E"] <- NA
}
if (nra >= 6) {
itemanalyse["F"] <- NA
}
for ( i in 1:nrq ) itemanalyse$A[i] <- (sum(str_count(data[,i], "A"))/nrst)
for ( i in 1:nrq ) itemanalyse$B[i] <- (sum(str_count(data[,i], "B"))/nrst)
if (nra >= 3) {
for ( i in 1:nrq ) itemanalyse$C[i] <- (sum(str_count(data[,i], "C"))/nrst)
}
if (nra >= 4) {
for ( i in 1:nrq ) itemanalyse$D[i] <- (sum(str_count(data[,i], "D"))/nrst)
}
if (nra >= 5) {
for ( i in 1:nrq ) itemanalyse$E[i] <- (sum(str_count(data[,i], "E"))/nrst)
}
if (nra >= 6) {
for ( i in 1:nrq ) itemanalyse$'F'[i] <- (sum(str_count(data[,i], "F"))/nrst)
}
##Genereer advies op basis van P- en rirwaarden
itemanalyse <- itemanalyse %>%
mutate(.A = if_else(Rel_P < 0.4 & rir <= 0.10, "A", ""),
.B = if_else(Rel_P < 0.7 & rir < -0.10, "B", ""),
.C = if_else(P_waarde < 0.3 & rir <= 0.05 & rir >= -0.05, "C", ""),
.D = if_else(P_waarde < (gk+0.04) & rir > 0.05, "D", ""),
.E = if_else(Rel_P + rir < 0.4, "E", ""))
##Verander kolom volgorde itemanalyse
if (nra == 2) {
itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, P_waarde, Rel_P, rir,
`New Alpha`, .A, .B, .C, .D, .E)
}
if (nra == 3) {
itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, P_waarde, Rel_P, rir,
`New Alpha`, .A, .B, .C, .D, .E)
}
if (nra == 4) {
itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, D, P_waarde, Rel_P, rir,
`New Alpha`, .A, .B, .C, .D, .E)
}
if (nra == 5) {
itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, D, E, P_waarde, Rel_P, rir,
`New Alpha`, .A, .B, .C, .D, .E)
}
if (nra == 6) {
itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, D, E, 'F', P_waarde, Rel_P, rir,
`New Alpha`, .A, .B, .C, .D, .E)
}
## Voeg gebruikte sleutel toe aan itemanalyse
tsleutel <- as.data.frame(t(sleutel))
itemanalyse <- cbind(tsleutel, itemanalyse) %>%
dplyr:: rename(Key = V1)
itemanalyse <- dplyr:: mutate(itemanalyse, itemName = colnames(sleutel))
itemanalyse <- dplyr:: rename(itemanalyse, Item = itemName, P = P_waarde, 'P\''= Rel_P )
##Bereken gemiddelde score en sd per toetsversie
versie_score <- inner_join(total_score, student_versies, by = "studentnummers") %>% group_by(Toetsversie) %>%
summarise(mean=mean(score), sd=sd(score), n=n())
ttest <- tsum.test(mean.x=versie_score$mean[1], s.x=versie_score$sd[1], n.x=versie_score$n[1],
mean.y=versie_score$mean[2], s.y=versie_score$sd[2], n.y=versie_score$n[2])
if(ttest$p.value < 0.05) {
write.csv2(versie_score, paste0(Network_directory,"Versie_score_verschillen.csv"))
print("Gemiddelde score versie B en A verschillen significant")
profvis::pause(20)
}
if (nrv == 3) {
ttest2 <- tsum.test(mean.x=versie_score$mean[1], s.x=versie_score$sd[1], n.x=versie_score$n[1],
mean.y=versie_score$mean[3], s.y=versie_score$sd[3], n.y=versie_score$n[3])
if(ttest2$p.value < 0.05) {
write.csv2(versie_score, paste0(Network_directory,"Versie_score_verschillen.csv"))
print("Gemiddelde score versie C en A verschillen significant")
profvis::pause(20)
}
}
if (nrv == 4) {
ttest2 <- tsum.test(mean.x=versie_score$mean[1], s.x=versie_score$sd[1], n.x=versie_score$n[1],
mean.y=versie_score$mean[3], s.y=versie_score$sd[3], n.y=versie_score$n[3])
if(ttest2$p.value < 0.05) {
write.csv2(versie_score, paste0(Network_directory,"Versie_score_verschillen.csv"))
print("Gemiddelde score versie C en A verschillen significant")
profvis::pause(20)
}
ttest3 <- tsum.test(mean.x=versie_score$mean[1], s.x=versie_score$sd[1], n.x=versie_score$n[1],
mean.y=versie_score$mean[4], s.y=versie_score$sd[4], n.y=versie_score$n[4])
if(ttest3$p.value < 0.05) {
write.csv2(versie_score, paste0(Network_directory,"Versie_score_verschillen.csv"))
print("Gemiddelde score versie D en A verschillen significant")
profvis::pause(20)
}
}
| /MC_toetsen/Analysescripts/Heranalyse Toetsanalyse meerdere versies stap 2.R | permissive | Dritty/toetsanalyse | R | false | false | 9,101 | r | ################################################################################
### Heranalyse Toetsanalyse meerdere versies stap 2.R
################################################################################
### R code voor Tentamenanalyse Vrije Universiteit Amsterdam
###
### Bestandsnaam: Heranalyse Toetsanalyse meerdere versies stap 2.R
### Doel: Analyseren van teleform tentamendata voor
### tentamen met meerdere versies
###
### Afhankelijkheden: geen
###
### Gebruikte datasets: Teleform .DEL bestand
###
### Opmerkingen:
###
################################################################################
### TODO:
### 1) Testen
###
################################################################################
### Geschiedenis:
### 09-08-2018: DD: Aanmaken bestand
################################################################################
## Vervang lege cellen met NA zodat deze goed gescoord worden
data[] <- lapply(data, str_trim)
is.na(data) <- data==''
##Transformeren van ruwe letter_data naar score data + basale analyse
scored_data <- score_mc(data, sleutel, multiKeySep = ",",
output.scored = TRUE, rel = TRUE)
## Maak een afleideranalyse op basis van gegeven antwoorden en sleutel
rar_analyse <- distractorAnalysis(data, sleutel, multiKeySep = ",", nGroups=3) %>% bind_rows(.id = "id") %>%
dplyr:: select(id, key, pBis) %>% spread(key = key, value = pBis)
tsleutel <- t(sleutel) %>% as.data.frame() %>%
rownames_to_column(var = "id") %>%
dplyr:: select(id, sleutel = V1)
rar_analyse <- rar_analyse %>%
left_join(tsleutel) %>%
mutate(vraagnummer = readr:: parse_number(id)) %>%
arrange(vraagnummer) %>%
dplyr:: select(vraagnummer,
sleutel,
everything()) %>%
dplyr:: select(-id)
studentnummers_namen <- teleformdata_correct[1:2]
##Toevoegen studentnummers en namen aan score data
scored_datax <- cbind(studentnummers_namen, scored_data$scored)
##Toevoegen studentnummers aan totaalscore student
total_score <- cbind(studentnummers_namen, scored_data[1])
##Transformeer scores naar cijfers
total_score <- mutate(total_score, cijfer = (10-(nrq-total_score$score)/(nrq-cesuur)*(10-5.5)))
total_score <- total_score %>% mutate(cijfer = replace(cijfer, cijfer<1, 1))
total_score <- dplyr:: rename(total_score, studentnamen = stud_naam, studentnummers = stud_nr) %>%
mutate(studentnummers = as.integer(studentnummers))
## Toon cronbachs alpha
KR20 <- purrr:: pluck(scored_data, 2, "alpha")
# KR20 <- scored_data$reliability$alpha
##Bereken KR-20 (75)
ifactor <- 75/nrq
KR20_75 <- round(CTT:: spearman.brown(KR20, input = ifactor, n.or.r = "n")$r.new, digits = 2)
##Item characteristic curves (ICC) voor alle items op 1 pagina
##(verwijder eerste 2 regels script om losse plots te creeren)
# par(mfrow=c(4,5))
# par(cex = 0.4)
# for ( i in 1:nrq ) cttICC(scored_data$score, scored_data$scored[,i],
# colTheme="spartans", cex=1.5, ylab=names(sleutel[i]))
##Maak itemanalyse
itemanalyse <- itemAnalysis(as.data.frame(scored_data$scored), NA.Delete=FALSE)$itemReport %>%
dplyr:: select(-bis) %>%
dplyr::rename(P_waarde = itemMean,
rir = pBis,
"New Alpha" = alphaIfDeleted)
##NA vervangen met nullen
itemanalyse[is.na(itemanalyse)] <- 0
##Bereken relatieve p-waarde
itemanalyse <- itemanalyse %>%
mutate(Rel_P = ((-1/(gk-1))*P_waarde+1-(-1/(gk-1))))
##Toetswaarden wegschrijven
geslaagd <- filter(total_score, cijfer >= 5.5) %>% nrow()
toets <- tbl_df(scored_data$reliability[1:5]) %>% round(digits = 2)
toets <- mutate(toets, KR20_75 = KR20_75) %>%
dplyr:: select(nItem,
nPerson,
alpha,
KR20_75,
scaleMean,
scaleSD) %>%
dplyr:: mutate(meanRelP = round(summarise(itemanalyse, mean(Rel_P))$`mean(Rel_P)`, digits = 2),
meanP = round(summarise(itemanalyse, mean(P_waarde))$`mean(P_waarde)`, digits = 2),
perc_geslaagd = paste0(round(geslaagd/nrow(total_score)*100),"%"),
cesuur = cesuur)
##Berekenen kappa
kappa <- round(((KR20)*(toets$scaleSD^2)+(toets$scaleMean-cesuur)^2)/((toets$scaleSD^2) + (toets$scaleMean-cesuur)^2), digits = 2)
toets <- mutate(toets, kappa = as.numeric(kappa))
##Bepaal aantal studenten
nrst <- toets$nPerson
## Vervang NA in data door lege cel
data[is.na(data)] <- " "
##Toevoegen A-waarde aan itemanalyse
itemanalyse["A"] <- NA
itemanalyse["B"] <- NA
if (nra >= 3) {
itemanalyse["C"] <- NA
}
if (nra >= 4 ) {
itemanalyse["D"] <- NA
}
if (nra >= 5) {
itemanalyse["E"] <- NA
}
if (nra >= 6) {
itemanalyse["F"] <- NA
}
for ( i in 1:nrq ) itemanalyse$A[i] <- (sum(str_count(data[,i], "A"))/nrst)
for ( i in 1:nrq ) itemanalyse$B[i] <- (sum(str_count(data[,i], "B"))/nrst)
if (nra >= 3) {
for ( i in 1:nrq ) itemanalyse$C[i] <- (sum(str_count(data[,i], "C"))/nrst)
}
if (nra >= 4) {
for ( i in 1:nrq ) itemanalyse$D[i] <- (sum(str_count(data[,i], "D"))/nrst)
}
if (nra >= 5) {
for ( i in 1:nrq ) itemanalyse$E[i] <- (sum(str_count(data[,i], "E"))/nrst)
}
if (nra >= 6) {
for ( i in 1:nrq ) itemanalyse$'F'[i] <- (sum(str_count(data[,i], "F"))/nrst)
}
##Genereer advies op basis van P- en rirwaarden
itemanalyse <- itemanalyse %>%
mutate(.A = if_else(Rel_P < 0.4 & rir <= 0.10, "A", ""),
.B = if_else(Rel_P < 0.7 & rir < -0.10, "B", ""),
.C = if_else(P_waarde < 0.3 & rir <= 0.05 & rir >= -0.05, "C", ""),
.D = if_else(P_waarde < (gk+0.04) & rir > 0.05, "D", ""),
.E = if_else(Rel_P + rir < 0.4, "E", ""))
##Verander kolom volgorde itemanalyse
if (nra == 2) {
itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, P_waarde, Rel_P, rir,
`New Alpha`, .A, .B, .C, .D, .E)
}
if (nra == 3) {
itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, P_waarde, Rel_P, rir,
`New Alpha`, .A, .B, .C, .D, .E)
}
if (nra == 4) {
itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, D, P_waarde, Rel_P, rir,
`New Alpha`, .A, .B, .C, .D, .E)
}
if (nra == 5) {
itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, D, E, P_waarde, Rel_P, rir,
`New Alpha`, .A, .B, .C, .D, .E)
}
if (nra == 6) {
itemanalyse <- itemanalyse %>% dplyr::select(itemName, A, B, C, D, E, 'F', P_waarde, Rel_P, rir,
`New Alpha`, .A, .B, .C, .D, .E)
}
## Voeg gebruikte sleutel toe aan itemanalyse
tsleutel <- as.data.frame(t(sleutel))
itemanalyse <- cbind(tsleutel, itemanalyse) %>%
dplyr:: rename(Key = V1)
itemanalyse <- dplyr:: mutate(itemanalyse, itemName = colnames(sleutel))
itemanalyse <- dplyr:: rename(itemanalyse, Item = itemName, P = P_waarde, 'P\''= Rel_P )
##Bereken gemiddelde score en sd per toetsversie
versie_score <- inner_join(total_score, student_versies, by = "studentnummers") %>% group_by(Toetsversie) %>%
summarise(mean=mean(score), sd=sd(score), n=n())
ttest <- tsum.test(mean.x=versie_score$mean[1], s.x=versie_score$sd[1], n.x=versie_score$n[1],
mean.y=versie_score$mean[2], s.y=versie_score$sd[2], n.y=versie_score$n[2])
if(ttest$p.value < 0.05) {
write.csv2(versie_score, paste0(Network_directory,"Versie_score_verschillen.csv"))
print("Gemiddelde score versie B en A verschillen significant")
profvis::pause(20)
}
if (nrv == 3) {
ttest2 <- tsum.test(mean.x=versie_score$mean[1], s.x=versie_score$sd[1], n.x=versie_score$n[1],
mean.y=versie_score$mean[3], s.y=versie_score$sd[3], n.y=versie_score$n[3])
if(ttest2$p.value < 0.05) {
write.csv2(versie_score, paste0(Network_directory,"Versie_score_verschillen.csv"))
print("Gemiddelde score versie C en A verschillen significant")
profvis::pause(20)
}
}
if (nrv == 4) {
ttest2 <- tsum.test(mean.x=versie_score$mean[1], s.x=versie_score$sd[1], n.x=versie_score$n[1],
mean.y=versie_score$mean[3], s.y=versie_score$sd[3], n.y=versie_score$n[3])
if(ttest2$p.value < 0.05) {
write.csv2(versie_score, paste0(Network_directory,"Versie_score_verschillen.csv"))
print("Gemiddelde score versie C en A verschillen significant")
profvis::pause(20)
}
ttest3 <- tsum.test(mean.x=versie_score$mean[1], s.x=versie_score$sd[1], n.x=versie_score$n[1],
mean.y=versie_score$mean[4], s.y=versie_score$sd[4], n.y=versie_score$n[4])
if(ttest3$p.value < 0.05) {
write.csv2(versie_score, paste0(Network_directory,"Versie_score_verschillen.csv"))
print("Gemiddelde score versie D en A verschillen significant")
profvis::pause(20)
}
}
|
library(dplyr)
library(ggplot2)
current_yield <- 0.01
yield_vol <- 0.0075
maturity <- 10
time_step <- 0.1
set.seed(0)
tibble(
t = seq(0, maturity, time_step),
) %>%
mutate(
yield_chg = rnorm(n(), 0, yield_vol * sqrt(time_step)),
yield = current_yield + cumsum(yield_chg),
price = 1 / (1 + yield) ^ (maturity - t)
) %>%
write_csv("bond_price_random_path.csv")
| /index-investing/data/bond_price.R | no_license | artem-bakulin/latex | R | false | false | 383 | r | library(dplyr)
library(ggplot2)
current_yield <- 0.01
yield_vol <- 0.0075
maturity <- 10
time_step <- 0.1
set.seed(0)
tibble(
t = seq(0, maturity, time_step),
) %>%
mutate(
yield_chg = rnorm(n(), 0, yield_vol * sqrt(time_step)),
yield = current_yield + cumsum(yield_chg),
price = 1 / (1 + yield) ^ (maturity - t)
) %>%
write_csv("bond_price_random_path.csv")
|
library(onls)
### Name: x0
### Title: x0-values from orthogonal nonlinear least squares regression
### Aliases: x0
### Keywords: optimize models nonlinear
### ** Examples
DNase1 <- subset(DNase, Run == 1)
DNase1$density <- sapply(DNase1$density, function(x) rnorm(1, x, 0.1 * x))
mod <- onls(density ~ Asym/(1 + exp((xmid - log(conc))/scal)),
data = DNase1, start = list(Asym = 3, xmid = 0, scal = 1))
x0(mod)
| /data/genthat_extracted_code/onls/examples/x0.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 431 | r | library(onls)
### Name: x0
### Title: x0-values from orthogonal nonlinear least squares regression
### Aliases: x0
### Keywords: optimize models nonlinear
### ** Examples
DNase1 <- subset(DNase, Run == 1)
DNase1$density <- sapply(DNase1$density, function(x) rnorm(1, x, 0.1 * x))
mod <- onls(density ~ Asym/(1 + exp((xmid - log(conc))/scal)),
data = DNase1, start = list(Asym = 3, xmid = 0, scal = 1))
x0(mod)
|
#' t.ci.table
#'
#' A function to calculate credible intervals and make a table. See page 169.
#'
#' @usage t.ci.table(coefs,cov.mat,level=0.95,degrees=Inf,quantiles=c(0.025,0.500,0.975))
#'
#' @param coefs vector of coefficient estimates, usually posterior means
#' @param cov.mat variance-covariance matrix
#' @param level desired coverage level
#' @param degrees degrees of freedom parameter for students-t distribution assumption
#' @param quantiles vector of desired CDF points (quantiles) to return
#'
#' @return quantile.mat matrix of quantiles
#'
#' @author Jeff Gill
#' @export
t.ci.table <- function(coefs,cov.mat,level=0.95,degrees=Inf,quantiles=c(0.025,0.500,0.975))
{
quantile.mat <- cbind( coefs, sqrt(diag(cov.mat)),
t(qt(quantiles,degrees) %o% sqrt(diag(cov.mat)))
+ matrix(rep(coefs,length(quantiles)),
ncol=length(quantiles)) )
quantile.names <- c("Mean","Std. Error")
for (i in 1:length(quantiles))
quantile.names <- c(quantile.names,paste(quantiles[i],
"Quantile"))
dimnames(quantile.mat)[2] <- list(quantile.names)
return(list(title="Posterior Quantities",round(quantile.mat,4)))
}
| /BaM2/R/t.ci.table.R | no_license | miguelmariagp/BAMnew | R | false | false | 1,123 | r | #' t.ci.table
#'
#' A function to calculate credible intervals and make a table. See page 169.
#'
#' @usage t.ci.table(coefs,cov.mat,level=0.95,degrees=Inf,quantiles=c(0.025,0.500,0.975))
#'
#' @param coefs vector of coefficient estimates, usually posterior means
#' @param cov.mat variance-covariance matrix
#' @param level desired coverage level
#' @param degrees degrees of freedom parameter for students-t distribution assumption
#' @param quantiles vector of desired CDF points (quantiles) to return
#'
#' @return quantile.mat matrix of quantiles
#'
#' @author Jeff Gill
#' @export
t.ci.table <- function(coefs,cov.mat,level=0.95,degrees=Inf,quantiles=c(0.025,0.500,0.975))
{
quantile.mat <- cbind( coefs, sqrt(diag(cov.mat)),
t(qt(quantiles,degrees) %o% sqrt(diag(cov.mat)))
+ matrix(rep(coefs,length(quantiles)),
ncol=length(quantiles)) )
quantile.names <- c("Mean","Std. Error")
for (i in 1:length(quantiles))
quantile.names <- c(quantile.names,paste(quantiles[i],
"Quantile"))
dimnames(quantile.mat)[2] <- list(quantile.names)
return(list(title="Posterior Quantities",round(quantile.mat,4)))
}
|
visualize.portfolio_sessions_all <- function(viz=as.viz("portfolio_sessions_all")){
library(dplyr)
library(tidyr)
library(ggplot2)
library(RColorBrewer)
library(grid)
deps <- readDepends(viz)
height = viz[["height"]]
width = viz[["width"]]
bar_line_col = viz[["bar_line_col"]]
text_col = viz[["text_col"]]
summary_data_full <- deps[["sessions_all"]]
min_app <- select(summary_data_full, bin, type, sessions, longName) %>%
filter(type == levels(summary_data_full$type)[1]) %>%
group_by(bin) %>%
slice(which.min(sessions))
max_vals <- summary_data_full %>%
group_by(type) %>%
summarize(max_val = max(scaled_value, na.rm = TRUE))
summary_data_full <- summary_data_full %>%
left_join(max_vals, by = "type") %>%
mutate(text_placement = scaled_value + 0.15*max_val)
summary_data_full$text_placement[summary_data_full$sessions == 0] <- 0
colfunc <- colorRampPalette(c("grey75","grey95"))
cols <- colfunc(4)
port_graph <- ggplot(data = summary_data_full,
aes(x = longName, y = scaled_value)) +
geom_rect(aes(fill = bin),xmin = -Inf,xmax = Inf,
ymin = -Inf,ymax = Inf,color = NA) +
scale_color_manual(values = c("none" = viz[["trend_color"]]$none,
"up" = viz[["trend_color"]]$up,
"down" = viz[["trend_color"]]$down)) +
geom_segment(aes(xend = longName), yend=0, size = 0.65, color = bar_line_col) +
geom_segment(aes(xend = longName, y = scaled_newUser),
yend=0, col=bar_line_col, size=1.15) +
geom_text(aes(label = session_text, y = text_placement, color = trend),
size = 3, hjust = .75,
data = summary_data_full[summary_data_full$scaled_value != 0,]) +
geom_text(aes(label = session_text, y = text_placement, color = trend),
size = 3, hjust = 0,
data = summary_data_full[summary_data_full$scaled_value == 0,]) +
geom_point(color = bar_line_col,
data = summary_data_full[summary_data_full$scaled_value != 0,]) +
facet_grid(bin ~ type, scales = "free",
space = "free_y", drop = TRUE) +
coord_flip() +
scale_fill_manual(values = cols) +
theme_bw() +
theme(axis.title = element_blank(),
axis.text.x = element_blank(),
strip.text.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
strip.background = element_blank(),
axis.ticks=element_blank(),
legend.position = "none"
)
info_graph <- ggplot_build(port_graph)
layout_stuff <- info_graph$layout
if(packageVersion("ggplot2") >= "2.2.1.9000"){
lower_ranges <- layout_stuff$panel_scales_y[[3]]$range$range
high_ranges <- layout_stuff$panel_scales_y[[1]]$range$range
} else {
lower_ranges <- layout_stuff$panel_ranges[[12]]$x.range
high_ranges <- layout_stuff$panel_ranges[[4]]$x.range
}
ymin <- 0.45*(diff(lower_ranges))+lower_ranges[1]
ymax <- 0.98*(diff(lower_ranges))+lower_ranges[1]
ystart <- 0.50*(diff(lower_ranges))+lower_ranges[1]
ymid <- 0.6*(diff(lower_ranges))+lower_ranges[1]
yend <- 0.95*(diff(lower_ranges))+lower_ranges[1]
bin_mid <- 0.95*(diff(high_ranges))+high_ranges[1]
text_df <- data.frame(label = c("Very High Traffic","High Traffic","Moderate Traffic","Low Traffic"),
type = factor(levels(summary_data_full$type)[1], levels = levels(summary_data_full$type)),
bin = factor(levels(summary_data_full$bin), levels = levels(summary_data_full$bin)),
longName = 1.25,
y = bin_mid,
stringsAsFactors = FALSE)
fake_legend <- data.frame(label = c("Total Users","New Users"),
type = factor(levels(summary_data_full$type)[3], levels = levels(summary_data_full$type)),
bin = factor(levels(summary_data_full$bin)[4], levels = levels(summary_data_full$bin)),
longName = rev(levels(summary_data_full$longName)[1:2]),
ymin = ymin,
ystart = ystart,
ymid = ymid,
yend = yend,
ymax = ymax,
trend_text = c(NA, NA),
stringsAsFactors = FALSE)
port_graph <- port_graph +
geom_label(data = text_df,
aes(x = longName, y = y, label = label),
size = 3.5,hjust = "right",label.r = unit(0, "lines")) +
geom_rect(data = fake_legend[1,], aes(y = 0),
ymin = fake_legend$ymin[1],
ymax = fake_legend$ymax[1],
xmin = .4,
xmax = 2.75,
color = "black", fill = "white") +
geom_text(data = fake_legend,
aes(x = longName, y = yend, label = label),
hjust = "right", col = "black") +
geom_segment(data = fake_legend[2,],
aes(x = longName,
xend = longName,
y = ystart, yend=ymid), col=bar_line_col, size=1.15) +
geom_segment(data = fake_legend[1,], aes(xend = longName, y=ystart, yend=ymid), size=0.65, col=bar_line_col) +
geom_point(data = fake_legend[1,], aes(x = longName, y=ymid), col=bar_line_col)
ggsave(port_graph, file = viz[["location"]], height = height, width = width)
}
| /scripts/visualize/portfolio_sessions_all.R | permissive | mhines-usgs/internal-analytics | R | false | false | 5,560 | r | visualize.portfolio_sessions_all <- function(viz=as.viz("portfolio_sessions_all")){
library(dplyr)
library(tidyr)
library(ggplot2)
library(RColorBrewer)
library(grid)
deps <- readDepends(viz)
height = viz[["height"]]
width = viz[["width"]]
bar_line_col = viz[["bar_line_col"]]
text_col = viz[["text_col"]]
summary_data_full <- deps[["sessions_all"]]
min_app <- select(summary_data_full, bin, type, sessions, longName) %>%
filter(type == levels(summary_data_full$type)[1]) %>%
group_by(bin) %>%
slice(which.min(sessions))
max_vals <- summary_data_full %>%
group_by(type) %>%
summarize(max_val = max(scaled_value, na.rm = TRUE))
summary_data_full <- summary_data_full %>%
left_join(max_vals, by = "type") %>%
mutate(text_placement = scaled_value + 0.15*max_val)
summary_data_full$text_placement[summary_data_full$sessions == 0] <- 0
colfunc <- colorRampPalette(c("grey75","grey95"))
cols <- colfunc(4)
port_graph <- ggplot(data = summary_data_full,
aes(x = longName, y = scaled_value)) +
geom_rect(aes(fill = bin),xmin = -Inf,xmax = Inf,
ymin = -Inf,ymax = Inf,color = NA) +
scale_color_manual(values = c("none" = viz[["trend_color"]]$none,
"up" = viz[["trend_color"]]$up,
"down" = viz[["trend_color"]]$down)) +
geom_segment(aes(xend = longName), yend=0, size = 0.65, color = bar_line_col) +
geom_segment(aes(xend = longName, y = scaled_newUser),
yend=0, col=bar_line_col, size=1.15) +
geom_text(aes(label = session_text, y = text_placement, color = trend),
size = 3, hjust = .75,
data = summary_data_full[summary_data_full$scaled_value != 0,]) +
geom_text(aes(label = session_text, y = text_placement, color = trend),
size = 3, hjust = 0,
data = summary_data_full[summary_data_full$scaled_value == 0,]) +
geom_point(color = bar_line_col,
data = summary_data_full[summary_data_full$scaled_value != 0,]) +
facet_grid(bin ~ type, scales = "free",
space = "free_y", drop = TRUE) +
coord_flip() +
scale_fill_manual(values = cols) +
theme_bw() +
theme(axis.title = element_blank(),
axis.text.x = element_blank(),
strip.text.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
strip.background = element_blank(),
axis.ticks=element_blank(),
legend.position = "none"
)
info_graph <- ggplot_build(port_graph)
layout_stuff <- info_graph$layout
if(packageVersion("ggplot2") >= "2.2.1.9000"){
lower_ranges <- layout_stuff$panel_scales_y[[3]]$range$range
high_ranges <- layout_stuff$panel_scales_y[[1]]$range$range
} else {
lower_ranges <- layout_stuff$panel_ranges[[12]]$x.range
high_ranges <- layout_stuff$panel_ranges[[4]]$x.range
}
ymin <- 0.45*(diff(lower_ranges))+lower_ranges[1]
ymax <- 0.98*(diff(lower_ranges))+lower_ranges[1]
ystart <- 0.50*(diff(lower_ranges))+lower_ranges[1]
ymid <- 0.6*(diff(lower_ranges))+lower_ranges[1]
yend <- 0.95*(diff(lower_ranges))+lower_ranges[1]
bin_mid <- 0.95*(diff(high_ranges))+high_ranges[1]
text_df <- data.frame(label = c("Very High Traffic","High Traffic","Moderate Traffic","Low Traffic"),
type = factor(levels(summary_data_full$type)[1], levels = levels(summary_data_full$type)),
bin = factor(levels(summary_data_full$bin), levels = levels(summary_data_full$bin)),
longName = 1.25,
y = bin_mid,
stringsAsFactors = FALSE)
fake_legend <- data.frame(label = c("Total Users","New Users"),
type = factor(levels(summary_data_full$type)[3], levels = levels(summary_data_full$type)),
bin = factor(levels(summary_data_full$bin)[4], levels = levels(summary_data_full$bin)),
longName = rev(levels(summary_data_full$longName)[1:2]),
ymin = ymin,
ystart = ystart,
ymid = ymid,
yend = yend,
ymax = ymax,
trend_text = c(NA, NA),
stringsAsFactors = FALSE)
port_graph <- port_graph +
geom_label(data = text_df,
aes(x = longName, y = y, label = label),
size = 3.5,hjust = "right",label.r = unit(0, "lines")) +
geom_rect(data = fake_legend[1,], aes(y = 0),
ymin = fake_legend$ymin[1],
ymax = fake_legend$ymax[1],
xmin = .4,
xmax = 2.75,
color = "black", fill = "white") +
geom_text(data = fake_legend,
aes(x = longName, y = yend, label = label),
hjust = "right", col = "black") +
geom_segment(data = fake_legend[2,],
aes(x = longName,
xend = longName,
y = ystart, yend=ymid), col=bar_line_col, size=1.15) +
geom_segment(data = fake_legend[1,], aes(xend = longName, y=ystart, yend=ymid), size=0.65, col=bar_line_col) +
geom_point(data = fake_legend[1,], aes(x = longName, y=ymid), col=bar_line_col)
ggsave(port_graph, file = viz[["location"]], height = height, width = width)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.