content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' Disease and antimicrobial surveillance in chicken farms
#'
#' Data collected in the ViParc project (http://viparc.org).
#'
#' @format A data frame with 2226 rows and 9 variables:
#' \describe{
#' \item{FARMCODE}{code of the farm, character.}
#' \item{CYCLE}{number of the cycle, integer.}
#' \item{WEEK}{number of the week, integer}
#' \item{SRepiratory}{presence of respiratory infection, logical.}
#' \item{SDiarrhoea}{presence of diarrhoea, logical.}
#' \item{SCNS}{presence of central nervous system infection, logical.}
#' \item{SMalaise}{presence of malaise, logical.}
#' \item{Week_Mt}{weekly mortality, numeric.}
#' \item{AntibioticUse}{use of antibiotic, logical.}
#' }
#' @source \url{http://viparc.org}
"chcknfarms"
#' Antimicrobial usage (AMU)
#'
#' Data collected in the ViParc project (http://viparc.org).
#'
#' Generated from \code{amu <- read.csv2(system.file("extdata", "AMU_week.csv", package = "viparc"), sep=",", na.strings = "na", header = TRUE, stringsAsFactors = FALSE)}
#'
#' @format A data frame with 3664 rows and 2 variables:
#' \describe{
#' \item{WeekID}{number of the week, integer.}
#' \item{AMU}{usage of antimicrobials, logical.}
#' }
#' @source \url{http://viparc.org}
"amu"
#' Diseases data
#'
#' Data collected in the ViParc project (http://viparc.org).
#'
#' Generated from \code{diseases <- read.csv2(system.file("extdata", "DISEASE_GridEvent.csv", package = "viparc"), sep=",", na.strings = "na", header = TRUE, stringsAsFactors = FALSE)}
#'
#' @format A data frame with 4271 rows and 17 variables:
#' \describe{
#' \item{FarmID}{ID of farm.}
#' \item{FlockID}{ID of flock, combining ID of farm with ID of cycle.}
#' \item{Cycle}{ID of cycle.}
#' \item{WEEK}{Week number.}
#' \item{WeekID}{ID of week, combining ID of flock with ID of week.}
#' \item{RESPIRATORY}{Presence / absence of respiratory symptoms.}
#' \item{DIARRHOEA}{Presence / absence of diarrhoea symptoms.}
#' \item{CNS}{Presence / absence of central nervous system infection symptoms.}
#' \item{MALAISE}{Presence / absence of malaise.}
#' \item{LEGLESIONS}{Presence / absence of leg lesions.}
#' \item{SUDDENDEATH}{Presence / absence of sudden death.}
#' \item{NoSign}{Presence / absence of symptoms.}
#' \item{Sick_yes}{Presence / absence of symptoms (any symptom).}
#' \item{OTHDISEASE}{Presence / absence of other disease.}
#' \item{CHICKENSICKNO}{Number of sick chicken.}
#' \item{CHICKENDISEASEDEATH}{Number of chicken dead, excluding sudden death.}
#' \item{CHICKENSUDDENDEATH}{Number of chicken dying from sudden death.}
#' }
#' @source \url{http://viparc.org}
"diseases"
#' AMU and Diseases data
#'
#' Data collected in the ViParc project (http://viparc.org).
#'
#' Generated from \code{amu_with_diseases <- merge(AMU, diseases, by = c("WeekID"), all.X = TRUE)}
#'
#' @format A data frame with 3663 rows and 18 variables:
#' \describe{
#' \item{WeekID}{ID of week, combining ID of flock with ID of week.}
#' \item{AMU}{usage of antimicrobials, logical.}
#' \item{FarmID}{ID of farm.}
#' \item{FlockID}{ID of flock, combining ID of farm with ID of cycle.}
#' \item{Cycle}{ID of cycle.}
#' \item{WEEK}{Week number.}
#' \item{RESPIRATORY}{Presence / absence of respiratory symptoms.}
#' \item{DIARRHOEA}{Presence / absence of diarrhoea symptoms.}
#' \item{CNS}{Presence / absence of central nervous system infection symptoms.}
#' \item{MALAISE}{Presence / absence of malaise.}
#' \item{LEGLESIONS}{Presence / absence of leg lesions.}
#' \item{SUDDENDEATH}{Presence / absence of sudden death.}
#' \item{NoSign}{Presence / absence of symptoms.}
#' \item{Sick_yes}{Presence / absence of symptoms (any symptom).}
#' \item{OTHDISEASE}{Presence / absence of other disease.}
#' \item{CHICKENSICKNO}{Number of sick chicken.}
#' \item{CHICKENDISEASEDEATH}{Number of chicken dead, excluding sudden death.}
#' \item{CHICKENSUDDENDEATH}{Number of chicken dying from sudden death.}
#' }
#' @source \url{http://viparc.org}
"amu_with_diseases"
| /R/data.R | no_license | viparc/viparc | R | false | false | 4,041 | r | #' Disease and antimicrobial surveillance in chicken farms
#'
#' Data collected in the ViParc project (http://viparc.org).
#'
#' @format A data frame with 2226 rows and 9 variables:
#' \describe{
#' \item{FARMCODE}{code of the farm, character.}
#' \item{CYCLE}{number of the cycle, integer.}
#' \item{WEEK}{number of the week, integer}
#' \item{SRepiratory}{presence of respiratory infection, logical.}
#' \item{SDiarrhoea}{presence of diarrhoea, logical.}
#' \item{SCNS}{presence of central nervous system infection, logical.}
#' \item{SMalaise}{presence of malaise, logical.}
#' \item{Week_Mt}{weekly mortality, numeric.}
#' \item{AntibioticUse}{use of antibiotic, logical.}
#' }
#' @source \url{http://viparc.org}
"chcknfarms"
#' Antimicrobial usage (AMU)
#'
#' Data collected in the ViParc project (http://viparc.org).
#'
#' Generated from \code{amu <- read.csv2(system.file("extdata", "AMU_week.csv", package = "viparc"), sep=",", na.strings = "na", header = TRUE, stringsAsFactors = FALSE)}
#'
#' @format A data frame with 3664 rows and 2 variables:
#' \describe{
#' \item{WeekID}{number of the week, integer.}
#' \item{AMU}{usage of antimicrobials, logical.}
#' }
#' @source \url{http://viparc.org}
"amu"
#' Diseases data
#'
#' Data collected in the ViParc project (http://viparc.org).
#'
#' Generated from \code{diseases <- read.csv2(system.file("extdata", "DISEASE_GridEvent.csv", package = "viparc"), sep=",", na.strings = "na", header = TRUE, stringsAsFactors = FALSE)}
#'
#' @format A data frame with 4271 rows and 17 variables:
#' \describe{
#' \item{FarmID}{ID of farm.}
#' \item{FlockID}{ID of flock, combining ID of farm with ID of cycle.}
#' \item{Cycle}{ID of cycle.}
#' \item{WEEK}{Week number.}
#' \item{WeekID}{ID of week, combining ID of flock with ID of week.}
#' \item{RESPIRATORY}{Presence / absence of respiratory symptoms.}
#' \item{DIARRHOEA}{Presence / absence of diarrhoea symptoms.}
#' \item{CNS}{Presence / absence of central nervous system infection symptoms.}
#' \item{MALAISE}{Presence / absence of malaise.}
#' \item{LEGLESIONS}{Presence / absence of leg lesions.}
#' \item{SUDDENDEATH}{Presence / absence of sudden death.}
#' \item{NoSign}{Presence / absence of symptoms.}
#' \item{Sick_yes}{Presence / absence of symptoms (any symptom).}
#' \item{OTHDISEASE}{Presence / absence of other disease.}
#' \item{CHICKENSICKNO}{Number of sick chicken.}
#' \item{CHICKENDISEASEDEATH}{Number of chicken dead, excluding sudden death.}
#' \item{CHICKENSUDDENDEATH}{Number of chicken dying from sudden death.}
#' }
#' @source \url{http://viparc.org}
"diseases"
#' AMU and Diseases data
#'
#' Data collected in the ViParc project (http://viparc.org).
#'
#' Generated from \code{amu_with_diseases <- merge(AMU, diseases, by = c("WeekID"), all.X = TRUE)}
#'
#' @format A data frame with 3663 rows and 18 variables:
#' \describe{
#' \item{WeekID}{ID of week, combining ID of flock with ID of week.}
#' \item{AMU}{usage of antimicrobials, logical.}
#' \item{FarmID}{ID of farm.}
#' \item{FlockID}{ID of flock, combining ID of farm with ID of cycle.}
#' \item{Cycle}{ID of cycle.}
#' \item{WEEK}{Week number.}
#' \item{RESPIRATORY}{Presence / absence of respiratory symptoms.}
#' \item{DIARRHOEA}{Presence / absence of diarrhoea symptoms.}
#' \item{CNS}{Presence / absence of central nervous system infection symptoms.}
#' \item{MALAISE}{Presence / absence of malaise.}
#' \item{LEGLESIONS}{Presence / absence of leg lesions.}
#' \item{SUDDENDEATH}{Presence / absence of sudden death.}
#' \item{NoSign}{Presence / absence of symptoms.}
#' \item{Sick_yes}{Presence / absence of symptoms (any symptom).}
#' \item{OTHDISEASE}{Presence / absence of other disease.}
#' \item{CHICKENSICKNO}{Number of sick chicken.}
#' \item{CHICKENDISEASEDEATH}{Number of chicken dead, excluding sudden death.}
#' \item{CHICKENSUDDENDEATH}{Number of chicken dying from sudden death.}
#' }
#' @source \url{http://viparc.org}
"amu_with_diseases"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{xml_atts}
\alias{xml_atts}
\title{Extract the attributes from a simple xml_node object}
\usage{
xml_atts(x)
}
\arguments{
\item{x}{xml_node with attributes}
}
\value{
character vector of the attributes
}
\description{
Extract the attributes from a simple xml_node object
}
| /man/xml_atts.Rd | permissive | BigelowLab/ndfd | R | false | true | 365 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{xml_atts}
\alias{xml_atts}
\title{Extract the attributes from a simple xml_node object}
\usage{
xml_atts(x)
}
\arguments{
\item{x}{xml_node with attributes}
}
\value{
character vector of the attributes
}
\description{
Extract the attributes from a simple xml_node object
}
|
expect_identical(make_filename(2013), 'accident_2013.csv.bz2') | /test/test.R | no_license | cregexp/MyPackage | R | false | false | 62 | r | expect_identical(make_filename(2013), 'accident_2013.csv.bz2') |
#' Risk Aversion Task
#'
#' @description
#' Hierarchical Bayesian Modeling of the Risk Aversion Task with the following parameters: "rho" (risk aversion), "lambda" (loss aversion), and "tau" (inverse temp).
#'
#' \strong{MODEL:}
#' Prospect Theory (Sokol-Hessner et al., 2009, PNAS)
#'
#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "gain", "loss", "cert", and "gamble". See \bold{Details} below for more information.
#' @param niter Number of iterations, including warm-up.
#' @param nwarmup Number of iterations used for warm-up only.
#' @param nchain Number of chains to be run.
#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.
#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.
#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.
#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".
#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.
#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.
#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.
#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.
#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.
#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.
#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.
#'
#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components:
#' \describe{
#' \item{\code{model}}{Character string with the name of the model (\code{"ra_prospect"}).}
#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter
#' values (as specified by \code{"indPars"}) for each subject.}
#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples
#' over different model parameters. }
#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.}
#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.}
#' }
#'
#' @importFrom rstan vb sampling stan_model rstan_options extract
#' @importFrom parallel detectCores
#' @importFrom stats median qnorm density
#' @importFrom utils read.table
#'
#' @details
#' This section describes some of the function arguments in greater detail.
#'
#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension
#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis.
#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns
#' represent variables. For the Risk Aversion Task, there should be four columns of data with the labels
#' "subjID", "riskyGain", "riskyLoss", and "safeOption". It is not necessary for the columns to be in this
#' particular order, however it is necessary that they be labelled correctly and contain the information below:
#' \describe{
#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.}
#' \item{\code{"gain"}}{Possible (50\%) gain outcome of a risky option (e.g. 9).}
#' \item{\code{"loss"}}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).}
#' \item{\code{"cert"}}{Guaranteed amount of a safe option. "cert" is assumed to be zero or greater than zero.}
#' \item{\code{"gamble"}}{If gamble was taken, gamble == 1, else gamble == 0.}
#' }
#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column
#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly,
#' there is no need to remove other miscellaneous data columns.
#'
#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the
#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample.
#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence
#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the
#' effects that initial values have on the resulting posteriors.
#'
#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be
#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling
#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When
#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")}
#' command. The chains should resemble a "furry caterpillar".
#'
#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen
#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to
#' generate the posterior.
#'
#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control
#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations
#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for
#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the
#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments.
#'
#' @export
#'
#' @references
#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The
#' Journal of Machine Learning Research, 15(1), 1593-1623.
#'
#' Sokol-Hessner, P., Hsu, M., Curley, N. G., Delgado, M. R., Camerer, C. F., Phelps, E. A., & Smith, E. E. (2009). Thinking like
#' a Trader Selectively Reduces Individuals' Loss Aversion. Proceedings of the National Academy of Sciences of the United States
#' of America, 106(13), 5035-5040. http://doi.org/10.2307/40455144?ref = search-gateway:1f452c8925000031ef87ca756455c9e3
#'
#' @seealso
#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM}
#'
#' @examples
#' \dontrun{
#' # Run the model and store results in "output"
#' output <- ra_prospect(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3)
#'
#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars')
#' plot(output, type = 'trace')
#'
#' # Check Rhat values (all Rhat values should be less than or equal to 1.1)
#' rhat(output)
#'
#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal)
#' plot(output)
#'
#' # Show the WAIC and LOOIC model fit estimates
#' printFit(output)
#'
#'
#' # Paths to data published in Sokol-Hessner et al. (2009)
#' path_to_attend_data = system.file("extdata/ra_data_attend.txt", package = "hBayesDM")
#'
#' path_to_regulate_data = system.file("extdata/ra_data_reappraisal.txt", package = "hBayesDM")
#' }
ra_prospect <- function(data = "choose",
niter = 4000,
nwarmup = 1000,
nchain = 4,
ncore = 1,
nthin = 1,
inits = "random",
indPars = "mean",
saveDir = NULL,
modelRegressor = FALSE,
vb = FALSE,
inc_postpred = FALSE,
adapt_delta = 0.95,
stepsize = 1,
max_treedepth = 10) {
# Path to .stan model file
if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.)
stop("** Model-based regressors are not available for this model **\n")
}
# To see how long computations take
startTime <- Sys.time()
# For using example data
if (data == "example") {
data <- system.file("extdata", "ra_exampleData.txt", package = "hBayesDM")
} else if (data == "choose") {
data <- file.choose()
}
# Load data
if (file.exists(data)) {
rawdata <- read.table( data, header = T, sep = "\t")
} else {
stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n")
}
# Remove rows containing NAs
NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs
NA_rows = unique(NA_rows_all[, "row"])
if (length(NA_rows) > 0) {
rawdata = rawdata[-NA_rows,]
cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "")
}
# Individual Subjects
subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks
numSubjs <- length(subjList) # number of subjects
# Specify the number of parameters and parameters of interest
numPars <- 3
POI <- c("mu_rho", "mu_lambda", "mu_tau",
"sigma",
"rho" , "lambda", "tau",
"log_lik")
if (inc_postpred) {
POI <- c(POI, "y_pred")
}
modelName <- "ra_prospect"
# Information for user
cat("\nModel name = ", modelName, "\n")
cat("Data file = ", data, "\n")
cat("\nDetails:\n")
if (vb) {
cat(" # Using variational inference # \n")
} else {
cat(" # of chains = ", nchain, "\n")
cat(" # of cores used = ", ncore, "\n")
cat(" # of MCMC samples (per chain) = ", niter, "\n")
cat(" # of burn-in samples = ", nwarmup, "\n")
}
cat(" # of subjects = ", numSubjs, "\n")
################################################################################
# THE DATA. ###################################################################
################################################################################
Tsubj <- as.vector( rep( 0, numSubjs)) # number of trials for each subject
for (sIdx in 1:numSubjs) {
curSubj <- subjList[ sIdx]
Tsubj[sIdx] <- sum( rawdata$subjID == curSubj) # Tsubj[N]
}
maxTrials <- max(Tsubj)
# Information for user continued
cat(" # of (max) trials per subject = ", maxTrials, "\n\n")
# for multiple subjects
gain <- array(0, c(numSubjs, maxTrials))
loss <- array(0, c(numSubjs, maxTrials))
cert <- array(0, c(numSubjs, maxTrials))
gamble <- array(-1, c(numSubjs, maxTrials))
for (i in 1:numSubjs) {
curSubj <- subjList[i]
useTrials <- Tsubj[i]
tmp <- subset(rawdata, rawdata$subjID == curSubj)
gain[i, 1:useTrials] <- tmp[1:useTrials, "gain"]
loss[i, 1:useTrials] <- abs(tmp[1:useTrials, "loss"]) # absolute loss amount
cert[i, 1:useTrials] <- tmp[1:useTrials, "cert"]
gamble[i, 1:useTrials] <- tmp[1:useTrials, "gamble"]
}
dataList <- list(
N = numSubjs,
T = maxTrials,
Tsubj = Tsubj,
numPars = numPars,
gain = gain,
loss = loss,
cert = cert,
gamble = gamble)
# inits
if (inits[1] != "random") {
if (inits[1] == "fixed") {
inits_fixed <- c(1.0, 1.0, 1.0)
} else {
if (length(inits) == numPars) {
inits_fixed <- inits
} else {
stop("Check your inital values!")
}
}
genInitList <- function() {
list(
mu_p = c( qnorm( inits_fixed[1]/2), qnorm( inits_fixed[2]/5), qnorm( inits_fixed[3]/5)),
sigma = c(1.0, 1.0, 1.0),
rho_p = rep(qnorm( inits_fixed[1]/2), numSubjs),
lambda_p = rep(qnorm( inits_fixed[2]/5), numSubjs),
tau_p = rep(qnorm( inits_fixed[3]/5), numSubjs)
)
}
} else {
genInitList <- "random"
}
if (ncore > 1) {
numCores <- parallel::detectCores()
if (numCores < ncore) {
options(mc.cores = numCores)
warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.")
} else {
options(mc.cores = ncore)
}
} else {
options(mc.cores = 1)
}
cat("***********************************\n")
cat("** Loading a precompiled model **\n")
cat("***********************************\n")
# Fit the Stan model
m = stanmodels$ra_prospect
if (vb) { # if variational Bayesian
fit = rstan::vb(m,
data = dataList,
pars = POI,
init = genInitList)
} else {
fit = rstan::sampling(m,
data = dataList,
pars = POI,
warmup = nwarmup,
init = genInitList,
iter = niter,
chains = nchain,
thin = nthin,
control = list(adapt_delta = adapt_delta,
max_treedepth = max_treedepth,
stepsize = stepsize))
}
# Extract the Stan fit object
parVals <- rstan::extract(fit, permuted = T)
if (inc_postpred) {
parVals$y_pred[parVals$y_pred == -1] <- NA
}
rho <- parVals$rho
lambda <- parVals$lambda
tau <- parVals$tau
# Individual parameters (e.g., individual posterior means)
allIndPars <- array(NA, c(numSubjs, numPars))
allIndPars <- as.data.frame(allIndPars)
for (i in 1:numSubjs) {
if (indPars == "mean") {
allIndPars[i,] <- c( mean(rho[, i]),
mean(lambda[, i]),
mean(tau[, i]))
} else if (indPars == "median") {
allIndPars[i,] <- c( median(rho[, i]),
median(lambda[, i]),
median(tau[, i]))
} else if (indPars == "mode") {
allIndPars[i,] <- c( estimate_mode(rho[, i]),
estimate_mode(lambda[, i]),
estimate_mode(tau[, i]))
}
}
allIndPars <- cbind(allIndPars, subjList)
colnames(allIndPars) <- c("rho",
"lambda",
"tau",
"subjID")
# Wrap up data into a list
modelData <- list(modelName, allIndPars, parVals, fit, rawdata)
names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata")
class(modelData) <- "hBayesDM"
# Total time of computations
endTime <- Sys.time()
timeTook <- endTime - startTime
# If saveDir is specified, save modelData as a file. If not, don't save
# Save each file with its model name and time stamp (date & time (hr & min))
if (!is.null(saveDir)) {
currTime <- Sys.time()
currDate <- Sys.Date()
currHr <- substr(currTime, 12, 13)
currMin <- substr(currTime, 15, 16)
timeStamp <- paste0(currDate, "_", currHr, "_", currMin)
dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data))
save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData")))
}
# Inform user of completion
cat("\n************************************\n")
cat("**** Model fitting is complete! ****\n")
cat("************************************\n")
return(modelData)
}
| /R/ra_prospect.R | no_license | youngahn/hBayesDM | R | false | false | 16,502 | r | #' Risk Aversion Task
#'
#' @description
#' Hierarchical Bayesian Modeling of the Risk Aversion Task with the following parameters: "rho" (risk aversion), "lambda" (loss aversion), and "tau" (inverse temp).
#'
#' \strong{MODEL:}
#' Prospect Theory (Sokol-Hessner et al., 2009, PNAS)
#'
#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "gain", "loss", "cert", and "gamble". See \bold{Details} below for more information.
#' @param niter Number of iterations, including warm-up.
#' @param nwarmup Number of iterations used for warm-up only.
#' @param nchain Number of chains to be run.
#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.
#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.
#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.
#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".
#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.
#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.
#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.
#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.
#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.
#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.
#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.
#'
#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components:
#' \describe{
#' \item{\code{model}}{Character string with the name of the model (\code{"ra_prospect"}).}
#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter
#' values (as specified by \code{"indPars"}) for each subject.}
#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples
#' over different model parameters. }
#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.}
#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.}
#' }
#'
#' @importFrom rstan vb sampling stan_model rstan_options extract
#' @importFrom parallel detectCores
#' @importFrom stats median qnorm density
#' @importFrom utils read.table
#'
#' @details
#' This section describes some of the function arguments in greater detail.
#'
#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension
#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis.
#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns
#' represent variables. For the Risk Aversion Task, there should be four columns of data with the labels
#' "subjID", "riskyGain", "riskyLoss", and "safeOption". It is not necessary for the columns to be in this
#' particular order, however it is necessary that they be labelled correctly and contain the information below:
#' \describe{
#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.}
#' \item{\code{"gain"}}{Possible (50\%) gain outcome of a risky option (e.g. 9).}
#' \item{\code{"loss"}}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).}
#' \item{\code{"cert"}}{Guaranteed amount of a safe option. "cert" is assumed to be zero or greater than zero.}
#' \item{\code{"gamble"}}{If gamble was taken, gamble == 1, else gamble == 0.}
#' }
#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column
#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly,
#' there is no need to remove other miscellaneous data columns.
#'
#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the
#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample.
#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence
#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the
#' effects that initial values have on the resulting posteriors.
#'
#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be
#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling
#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When
#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")}
#' command. The chains should resemble a "furry caterpillar".
#'
#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen
#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to
#' generate the posterior.
#'
#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control
#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations
#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for
#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the
#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments.
#'
#' @export
#'
#' @references
#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The
#' Journal of Machine Learning Research, 15(1), 1593-1623.
#'
#' Sokol-Hessner, P., Hsu, M., Curley, N. G., Delgado, M. R., Camerer, C. F., Phelps, E. A., & Smith, E. E. (2009). Thinking like
#' a Trader Selectively Reduces Individuals' Loss Aversion. Proceedings of the National Academy of Sciences of the United States
#' of America, 106(13), 5035-5040. http://doi.org/10.2307/40455144?ref = search-gateway:1f452c8925000031ef87ca756455c9e3
#'
#' @seealso
#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM}
#'
#' @examples
#' \dontrun{
#' # Run the model and store results in "output"
#' output <- ra_prospect(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3)
#'
#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars')
#' plot(output, type = 'trace')
#'
#' # Check Rhat values (all Rhat values should be less than or equal to 1.1)
#' rhat(output)
#'
#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal)
#' plot(output)
#'
#' # Show the WAIC and LOOIC model fit estimates
#' printFit(output)
#'
#'
#' # Paths to data published in Sokol-Hessner et al. (2009)
#' path_to_attend_data = system.file("extdata/ra_data_attend.txt", package = "hBayesDM")
#'
#' path_to_regulate_data = system.file("extdata/ra_data_reappraisal.txt", package = "hBayesDM")
#' }
ra_prospect <- function(data = "choose",
niter = 4000,
nwarmup = 1000,
nchain = 4,
ncore = 1,
nthin = 1,
inits = "random",
indPars = "mean",
saveDir = NULL,
modelRegressor = FALSE,
vb = FALSE,
inc_postpred = FALSE,
adapt_delta = 0.95,
stepsize = 1,
max_treedepth = 10) {
# Path to .stan model file
if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.)
stop("** Model-based regressors are not available for this model **\n")
}
# To see how long computations take
startTime <- Sys.time()
# For using example data
if (data == "example") {
data <- system.file("extdata", "ra_exampleData.txt", package = "hBayesDM")
} else if (data == "choose") {
data <- file.choose()
}
# Load data
if (file.exists(data)) {
rawdata <- read.table( data, header = T, sep = "\t")
} else {
stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n")
}
# Remove rows containing NAs
NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs
NA_rows = unique(NA_rows_all[, "row"])
if (length(NA_rows) > 0) {
rawdata = rawdata[-NA_rows,]
cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "")
}
# Individual Subjects
subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks
numSubjs <- length(subjList) # number of subjects
# Specify the number of parameters and parameters of interest
numPars <- 3
POI <- c("mu_rho", "mu_lambda", "mu_tau",
"sigma",
"rho" , "lambda", "tau",
"log_lik")
if (inc_postpred) {
POI <- c(POI, "y_pred")
}
modelName <- "ra_prospect"
# Information for user
cat("\nModel name = ", modelName, "\n")
cat("Data file = ", data, "\n")
cat("\nDetails:\n")
if (vb) {
cat(" # Using variational inference # \n")
} else {
cat(" # of chains = ", nchain, "\n")
cat(" # of cores used = ", ncore, "\n")
cat(" # of MCMC samples (per chain) = ", niter, "\n")
cat(" # of burn-in samples = ", nwarmup, "\n")
}
cat(" # of subjects = ", numSubjs, "\n")
################################################################################
# THE DATA. ###################################################################
################################################################################
Tsubj <- as.vector( rep( 0, numSubjs)) # number of trials for each subject
for (sIdx in 1:numSubjs) {
curSubj <- subjList[ sIdx]
Tsubj[sIdx] <- sum( rawdata$subjID == curSubj) # Tsubj[N]
}
maxTrials <- max(Tsubj)
# Information for user continued
cat(" # of (max) trials per subject = ", maxTrials, "\n\n")
# for multiple subjects
gain <- array(0, c(numSubjs, maxTrials))
loss <- array(0, c(numSubjs, maxTrials))
cert <- array(0, c(numSubjs, maxTrials))
gamble <- array(-1, c(numSubjs, maxTrials))
for (i in 1:numSubjs) {
curSubj <- subjList[i]
useTrials <- Tsubj[i]
tmp <- subset(rawdata, rawdata$subjID == curSubj)
gain[i, 1:useTrials] <- tmp[1:useTrials, "gain"]
loss[i, 1:useTrials] <- abs(tmp[1:useTrials, "loss"]) # absolute loss amount
cert[i, 1:useTrials] <- tmp[1:useTrials, "cert"]
gamble[i, 1:useTrials] <- tmp[1:useTrials, "gamble"]
}
dataList <- list(
N = numSubjs,
T = maxTrials,
Tsubj = Tsubj,
numPars = numPars,
gain = gain,
loss = loss,
cert = cert,
gamble = gamble)
# inits
if (inits[1] != "random") {
if (inits[1] == "fixed") {
inits_fixed <- c(1.0, 1.0, 1.0)
} else {
if (length(inits) == numPars) {
inits_fixed <- inits
} else {
stop("Check your inital values!")
}
}
genInitList <- function() {
list(
mu_p = c( qnorm( inits_fixed[1]/2), qnorm( inits_fixed[2]/5), qnorm( inits_fixed[3]/5)),
sigma = c(1.0, 1.0, 1.0),
rho_p = rep(qnorm( inits_fixed[1]/2), numSubjs),
lambda_p = rep(qnorm( inits_fixed[2]/5), numSubjs),
tau_p = rep(qnorm( inits_fixed[3]/5), numSubjs)
)
}
} else {
genInitList <- "random"
}
if (ncore > 1) {
numCores <- parallel::detectCores()
if (numCores < ncore) {
options(mc.cores = numCores)
warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.")
} else {
options(mc.cores = ncore)
}
} else {
options(mc.cores = 1)
}
cat("***********************************\n")
cat("** Loading a precompiled model **\n")
cat("***********************************\n")
# Fit the Stan model
m = stanmodels$ra_prospect
if (vb) { # if variational Bayesian
fit = rstan::vb(m,
data = dataList,
pars = POI,
init = genInitList)
} else {
fit = rstan::sampling(m,
data = dataList,
pars = POI,
warmup = nwarmup,
init = genInitList,
iter = niter,
chains = nchain,
thin = nthin,
control = list(adapt_delta = adapt_delta,
max_treedepth = max_treedepth,
stepsize = stepsize))
}
# Extract the Stan fit object
parVals <- rstan::extract(fit, permuted = T)
if (inc_postpred) {
parVals$y_pred[parVals$y_pred == -1] <- NA
}
rho <- parVals$rho
lambda <- parVals$lambda
tau <- parVals$tau
# Individual parameters (e.g., individual posterior means)
allIndPars <- array(NA, c(numSubjs, numPars))
allIndPars <- as.data.frame(allIndPars)
for (i in 1:numSubjs) {
if (indPars == "mean") {
allIndPars[i,] <- c( mean(rho[, i]),
mean(lambda[, i]),
mean(tau[, i]))
} else if (indPars == "median") {
allIndPars[i,] <- c( median(rho[, i]),
median(lambda[, i]),
median(tau[, i]))
} else if (indPars == "mode") {
allIndPars[i,] <- c( estimate_mode(rho[, i]),
estimate_mode(lambda[, i]),
estimate_mode(tau[, i]))
}
}
allIndPars <- cbind(allIndPars, subjList)
colnames(allIndPars) <- c("rho",
"lambda",
"tau",
"subjID")
# Wrap up data into a list
modelData <- list(modelName, allIndPars, parVals, fit, rawdata)
names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata")
class(modelData) <- "hBayesDM"
# Total time of computations
endTime <- Sys.time()
timeTook <- endTime - startTime
# If saveDir is specified, save modelData as a file. If not, don't save
# Save each file with its model name and time stamp (date & time (hr & min))
if (!is.null(saveDir)) {
currTime <- Sys.time()
currDate <- Sys.Date()
currHr <- substr(currTime, 12, 13)
currMin <- substr(currTime, 15, 16)
timeStamp <- paste0(currDate, "_", currHr, "_", currMin)
dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data))
save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData")))
}
# Inform user of completion
cat("\n************************************\n")
cat("**** Model fitting is complete! ****\n")
cat("************************************\n")
return(modelData)
}
|
/attributeselect.R | no_license | hying99/teachers | R | false | false | 7,297 | r | ||
library(ggmap)
library(rgdal)
library(gtable)
library(maps)
library(mapdata)
library(rgeos)
source(paste(DIR$'General functions',"\\get_first_number.R",sep=""))
source(paste(DIR$'General functions',"formatShape.R",sep=""))
this_run<-"base"
this_out<-paste("TestPL2",sep="")
#
basePath<-paste(DIR$'Base',"ATLANTISmodels\\",sep="")
outPath<-paste(basePath,this_run,"\\","output",this_out,"\\",sep="")
plotPath<-paste(basePath,"\\Figures\\",this_run,"Zoo1\\TestPL2",sep="")
daysTimeStep<-365
numStepsPerYear<-365/daysTimeStep
year0<-1900
fishingStartYear<-1900
modelStartYear<-1900
ThisNC.nc<-nc_open(paste(outPath,"output.nc",sep=""))
thisVol<-ncvar_get(ThisNC.nc,"volume")
thisDz<-ncvar_get(ThisNC.nc,"dz")
allTracers<-sort(names(ThisNC.nc$var))
nts<-dim(thisVol)[3] #number of timesteps
nbox<-dim(thisVol)[2]; nlayer<-dim(thisVol)[1]
# altNC.nc<-nc_open(paste(outPath,"..\\outputShort\\output.nc",sep=""))
# altDF<-ncvar_get(altNC.nc,"DinoFlag_N")
# df<-ncvar_get(ThisNC.nc,"DinoFlag_N")
# par(mfrow=c(2,2),mar=c(3,4,0,0))
# plot(df[5,2,],type="l"); mtext(2,side=3,line=-1); points(altDF[5,2,],type="l",col=myRed,lty=2,lwd=2)
# plot(df[5,9,],type="l"); mtext(9,side=3,line=-1); points(altDF[5,9,],type="l",col=myRed,lty=2,lwd=2)
# plot(df[5,20,],type="l"); mtext(20,side=3,line=-1); points(altDF[5,20,],type="l",col=myRed,lty=2,lwd=2)
# plot(df[5,12,],type="l"); mtext(12,side=3,line=-1); points(altDF[5,12,],type="l",col=myRed,lty=2,lwd=2)
# df<-ncvar_get(ThisNC.nc,"MicroPB_N")
# plot(df[6,2,],type="l"); mtext(2,side=3,line=-1); plot(df[6,9,],type="l"); mtext(9,side=3,line=-1); plot(df[6,20,],type="l"); mtext(20,side=3,line=-1); plot(df[6,12,],type="l"); mtext(12,side=3,line=-1);
# shortO2<-ncvar_get(ThisNC.nc,"Oxygen")
# testO2<-ncvar_get(ThisNC.nc,"Oxygen")
# testDepth<-ncvar_get(ThisNC.nc,"dz")
#
# shortO2vec<-as.double(as.vector(shortO2)); testO2vec<-as.double(as.vector(testO2)); testDepthVec<-as.double(as.vector(testDepth))
# plot(shortO2vec[1:length(testO2vec)],testO2vec)
# index<-testDepth>1
# plot(testDepth[index],testO2vec[index])
#
# #read in shape file
# pdf("testShape.pdf")
# shapeFile<-paste(DIR$'Base',"ATLANTISmodels\\inputs\\bgm\\CHAT30_LL",sep="")
# sdata<-read.shapefile(shapeFile)
# shape<-formatShape(shapeFile=shapeFile)
# ns<-length(shape)
# SpDF <- SpatialPolygonsDataFrame(shape,data.frame( z=1:ns, row.names=paste("P",seq(1,(ns)),sep="")))
# labels<-seq(1,(ns))
# plot(shape)
# LABELpOS<-polygonsLabel(shape, labels = labels, cex=.1,doPlot=FALSE)
# labeldf<-data.frame(cbind("x"=LABELpOS[1:ns],"y"=LABELpOS[(ns+1):(2*ns)]))
# dev.off()
# wcColors<-colorRampPalette(colors=c(myAqua,myBlue,"midnightblue"))((nlayer-1))
# waterColumnColors<-data.frame(cbind("layer"=seq(0,(nlayer-2)),"color"=wcColors))
# sedCol<-myGreen
# #################
## Tracer to plot ##
###################
# thisTracer<-"NH3"
# thisTracer<-"NO3"
# thisTracer<-"Nitrification"
# thisTracer<-"Sed_Bact_N"
# thisTracer<-"Oxygen"
# thisTracer<-"Pelag_Bact_N"
# thisTracer<-"dz"
# thisTracer<-"Pelagic_fish_lge1_Nums"
# thisTracer<-"Arrow_squid1_Nums"
# thisTracer<-"Arrow_squid2_Nums"
# thisTracer<-"DinoFlag_N"
# allPlotTracers<-sort(unique(c("Meiobenth_N","Pelagic_fish_lge1_Nums","Nitrification","NH3","NO3","Oxygen","Pelag_Bact_N","Sed_Bact_N","dz","Lab_Det_N","Ref_Det_N","sedoxdepth","Si","vflux","volume","Macroalgae_N", "Macroalgae_Cover", "MicroPB_N", "MicroPB_Cover", "MicroPB_S","Diatom_N","Diatom_S","PicoPhytopl_N","DinoFlag_N","Chl_a")))
# allPlotTracers<-sort(unique(c("Light","Si","Nitrification","NH3","NO3","Oxygen","Pelag_Bact_N","Sed_Bact_N","Lab_Det_N","Ref_Det_N","PicoPhytopl_N","DinoFlag_N","Chl_a","Macroalgae_N","Diatom_N","Diatom_S","MicroPB_N","Zoo_N")))
# allPlotTracers<-c("PicoPhytopl_N", "Diatom_N")
#
# allPlotTracers<-c("Chl_a","MicroPB_N","Meiobenth_N","Lab_Det_N")
#
# allPlotTracers<-paste("Baleen_whales",seq(1,10),"_Nums",sep="")
#
# allPlotTracers<-paste("Invert_comm_Scav",seq(1,10),"_Nums",sep="")
#
allPlotTracers<-allTracers[grep("zoo",allTracers,ignore.case = TRUE)]
#
# allPlotTracers<-paste("Pelagic_fish_sml",seq(1,4),"_Nums",sep="")
#
# allPlotTracers<-"Chl_a"
#
# allPlotTracers<-c("Mesopel_fish_Invert_N", "Smooth_oreo_N")
#
# allPlotTracers<-c("DinoFlag_N","Chl_a", "Ref_Det_N", "Lab_Det_N",allTracers[grep("zoo",allTracers,ignore.case = TRUE)])
#
# allPlotTracers<-c(allTracers[grep("zoo",allTracers,ignore.case = TRUE)])
#
# allPlotTracers<-c("DinoFlag_N", "MicroZoo_N", "Diatom_N", "Pelag_Bact_N", "MicroPB_N", "Lab_Det_N")
#
# allPlotTracers<-c("Meiobenth_N")
#
# allPlotTracers<-c("Light", "Light_Adaptn_DF")
# allPlotTracers<-c("NH3", "NO3")
# allPlotTracers<-c("Sed_Bact_N")
allPlotTracers<-c("DinoFlag_N", "Filter_Other_N", "Invert_comm_Herb_N" )
allPlotTracers<-c("Det_Si", "Diatom_N", "Si", "PicoPhytopl_N")
for(thisTracer in allPlotTracers){
# thisTracer<-"Carniv_Zoo_N"
thisData<-ncvar_get(ThisNC.nc,thisTracer)
pdf(paste(plotPath,"Tracer3DwithMap_",thisTracer,this_out,".pdf",sep=""))
par(mfrow=c(2,2))
for(b in 1:nbox){
if(length(dim(thisData))==2){
temp<-matrix(0,ncol=nts,nrow=nlayer)
temp[nlayer,]<-thisData[b,]
}else{
temp<-thisData[,b,]
}
#get depth for this box
thisDepth<-round(sum(thisDz[,b,1]-1))
thisMax<-max(temp)
if(thisMax>0){
plot(x=0*as.double(temp[1,]),type="n",ylim=c(0,thisMax),ylab="Tracer abundance",xlab="Timestep")
layerIndex<-rowSums(temp)>0; layerIndex[nlayer]<-TRUE; layerIndex[is.na(layerIndex)]<-FALSE
temp<-temp[layerIndex,]
#last layer is sediment. Other layers are wter column and need to be reversed
thisWCLayers<-rev(seq((nlayer-2),0)[layerIndex[1:(nlayer-1)]])
thisNL<-length(thisWCLayers); thisWCcolors<-(waterColumnColors$color[match(thisWCLayers,waterColumnColors$layer)])
#get last value
if(length(dim(temp))==0){
fvalue<-signif(temp[nts],2)
points(temp,col=sedCol,lwd=2,type="l",lty=2)
}else{
fvalue<-signif(mean(temp[,nts]),2)
for(l in 1:thisNL){
thisCol<-as.character(thisWCcolors[l])
if(is.na(thisCol)){thisCol<-myGrey}
points(temp[l,],col=thisCol,lwd=2,type="l")
}
points(temp[(thisNL+1),],col=sedCol,lwd=2,type="l",lty=2)
}
mtext(paste(thisTracer,", box ",b,sep=""),side=3,adj=0.5,outer=FALSE)
axis(at=fvalue,labels=fvalue,side=4)
plot(shape)
# map('nzHires',add=TRUE,col="black",lwd=2)
# map.axes()
for(plotB in 1:dim(labeldf)[1]){
if(b== plotB){thisCol=myGreen}else{thisCol="white"}
polygon(sdata$shp$shp[[plotB]]$points,col=thisCol)
}
mtext(paste("Depth= ",thisDepth," m",sep=""),side=3,adj=1,outer=FALSE)
}
}
dev.off()
}
| /(2)Diagnostic_plots/(01c)plotGivenTracerByBoxAndLayerInclMap.R | no_license | mcgregorv/AtlantisRscripts | R | false | false | 6,940 | r | library(ggmap)
library(rgdal)
library(gtable)
library(maps)
library(mapdata)
library(rgeos)
source(paste(DIR$'General functions',"\\get_first_number.R",sep=""))
source(paste(DIR$'General functions',"formatShape.R",sep=""))
this_run<-"base"
this_out<-paste("TestPL2",sep="")
#
basePath<-paste(DIR$'Base',"ATLANTISmodels\\",sep="")
outPath<-paste(basePath,this_run,"\\","output",this_out,"\\",sep="")
plotPath<-paste(basePath,"\\Figures\\",this_run,"Zoo1\\TestPL2",sep="")
daysTimeStep<-365
numStepsPerYear<-365/daysTimeStep
year0<-1900
fishingStartYear<-1900
modelStartYear<-1900
ThisNC.nc<-nc_open(paste(outPath,"output.nc",sep=""))
thisVol<-ncvar_get(ThisNC.nc,"volume")
thisDz<-ncvar_get(ThisNC.nc,"dz")
allTracers<-sort(names(ThisNC.nc$var))
nts<-dim(thisVol)[3] #number of timesteps
nbox<-dim(thisVol)[2]; nlayer<-dim(thisVol)[1]
# altNC.nc<-nc_open(paste(outPath,"..\\outputShort\\output.nc",sep=""))
# altDF<-ncvar_get(altNC.nc,"DinoFlag_N")
# df<-ncvar_get(ThisNC.nc,"DinoFlag_N")
# par(mfrow=c(2,2),mar=c(3,4,0,0))
# plot(df[5,2,],type="l"); mtext(2,side=3,line=-1); points(altDF[5,2,],type="l",col=myRed,lty=2,lwd=2)
# plot(df[5,9,],type="l"); mtext(9,side=3,line=-1); points(altDF[5,9,],type="l",col=myRed,lty=2,lwd=2)
# plot(df[5,20,],type="l"); mtext(20,side=3,line=-1); points(altDF[5,20,],type="l",col=myRed,lty=2,lwd=2)
# plot(df[5,12,],type="l"); mtext(12,side=3,line=-1); points(altDF[5,12,],type="l",col=myRed,lty=2,lwd=2)
# df<-ncvar_get(ThisNC.nc,"MicroPB_N")
# plot(df[6,2,],type="l"); mtext(2,side=3,line=-1); plot(df[6,9,],type="l"); mtext(9,side=3,line=-1); plot(df[6,20,],type="l"); mtext(20,side=3,line=-1); plot(df[6,12,],type="l"); mtext(12,side=3,line=-1);
# shortO2<-ncvar_get(ThisNC.nc,"Oxygen")
# testO2<-ncvar_get(ThisNC.nc,"Oxygen")
# testDepth<-ncvar_get(ThisNC.nc,"dz")
#
# shortO2vec<-as.double(as.vector(shortO2)); testO2vec<-as.double(as.vector(testO2)); testDepthVec<-as.double(as.vector(testDepth))
# plot(shortO2vec[1:length(testO2vec)],testO2vec)
# index<-testDepth>1
# plot(testDepth[index],testO2vec[index])
#
# #read in shape file
# pdf("testShape.pdf")
# shapeFile<-paste(DIR$'Base',"ATLANTISmodels\\inputs\\bgm\\CHAT30_LL",sep="")
# sdata<-read.shapefile(shapeFile)
# shape<-formatShape(shapeFile=shapeFile)
# ns<-length(shape)
# SpDF <- SpatialPolygonsDataFrame(shape,data.frame( z=1:ns, row.names=paste("P",seq(1,(ns)),sep="")))
# labels<-seq(1,(ns))
# plot(shape)
# LABELpOS<-polygonsLabel(shape, labels = labels, cex=.1,doPlot=FALSE)
# labeldf<-data.frame(cbind("x"=LABELpOS[1:ns],"y"=LABELpOS[(ns+1):(2*ns)]))
# dev.off()
# wcColors<-colorRampPalette(colors=c(myAqua,myBlue,"midnightblue"))((nlayer-1))
# waterColumnColors<-data.frame(cbind("layer"=seq(0,(nlayer-2)),"color"=wcColors))
# sedCol<-myGreen
# #################
## Tracer to plot ##
###################
# thisTracer<-"NH3"
# thisTracer<-"NO3"
# thisTracer<-"Nitrification"
# thisTracer<-"Sed_Bact_N"
# thisTracer<-"Oxygen"
# thisTracer<-"Pelag_Bact_N"
# thisTracer<-"dz"
# thisTracer<-"Pelagic_fish_lge1_Nums"
# thisTracer<-"Arrow_squid1_Nums"
# thisTracer<-"Arrow_squid2_Nums"
# thisTracer<-"DinoFlag_N"
# allPlotTracers<-sort(unique(c("Meiobenth_N","Pelagic_fish_lge1_Nums","Nitrification","NH3","NO3","Oxygen","Pelag_Bact_N","Sed_Bact_N","dz","Lab_Det_N","Ref_Det_N","sedoxdepth","Si","vflux","volume","Macroalgae_N", "Macroalgae_Cover", "MicroPB_N", "MicroPB_Cover", "MicroPB_S","Diatom_N","Diatom_S","PicoPhytopl_N","DinoFlag_N","Chl_a")))
# allPlotTracers<-sort(unique(c("Light","Si","Nitrification","NH3","NO3","Oxygen","Pelag_Bact_N","Sed_Bact_N","Lab_Det_N","Ref_Det_N","PicoPhytopl_N","DinoFlag_N","Chl_a","Macroalgae_N","Diatom_N","Diatom_S","MicroPB_N","Zoo_N")))
# allPlotTracers<-c("PicoPhytopl_N", "Diatom_N")
#
# allPlotTracers<-c("Chl_a","MicroPB_N","Meiobenth_N","Lab_Det_N")
#
# allPlotTracers<-paste("Baleen_whales",seq(1,10),"_Nums",sep="")
#
# allPlotTracers<-paste("Invert_comm_Scav",seq(1,10),"_Nums",sep="")
#
allPlotTracers<-allTracers[grep("zoo",allTracers,ignore.case = TRUE)]
#
# allPlotTracers<-paste("Pelagic_fish_sml",seq(1,4),"_Nums",sep="")
#
# allPlotTracers<-"Chl_a"
#
# allPlotTracers<-c("Mesopel_fish_Invert_N", "Smooth_oreo_N")
#
# allPlotTracers<-c("DinoFlag_N","Chl_a", "Ref_Det_N", "Lab_Det_N",allTracers[grep("zoo",allTracers,ignore.case = TRUE)])
#
# allPlotTracers<-c(allTracers[grep("zoo",allTracers,ignore.case = TRUE)])
#
# allPlotTracers<-c("DinoFlag_N", "MicroZoo_N", "Diatom_N", "Pelag_Bact_N", "MicroPB_N", "Lab_Det_N")
#
# allPlotTracers<-c("Meiobenth_N")
#
# allPlotTracers<-c("Light", "Light_Adaptn_DF")
# allPlotTracers<-c("NH3", "NO3")
# allPlotTracers<-c("Sed_Bact_N")
allPlotTracers<-c("DinoFlag_N", "Filter_Other_N", "Invert_comm_Herb_N" )
allPlotTracers<-c("Det_Si", "Diatom_N", "Si", "PicoPhytopl_N")
for(thisTracer in allPlotTracers){
# thisTracer<-"Carniv_Zoo_N"
thisData<-ncvar_get(ThisNC.nc,thisTracer)
pdf(paste(plotPath,"Tracer3DwithMap_",thisTracer,this_out,".pdf",sep=""))
par(mfrow=c(2,2))
for(b in 1:nbox){
if(length(dim(thisData))==2){
temp<-matrix(0,ncol=nts,nrow=nlayer)
temp[nlayer,]<-thisData[b,]
}else{
temp<-thisData[,b,]
}
#get depth for this box
thisDepth<-round(sum(thisDz[,b,1]-1))
thisMax<-max(temp)
if(thisMax>0){
plot(x=0*as.double(temp[1,]),type="n",ylim=c(0,thisMax),ylab="Tracer abundance",xlab="Timestep")
layerIndex<-rowSums(temp)>0; layerIndex[nlayer]<-TRUE; layerIndex[is.na(layerIndex)]<-FALSE
temp<-temp[layerIndex,]
#last layer is sediment. Other layers are wter column and need to be reversed
thisWCLayers<-rev(seq((nlayer-2),0)[layerIndex[1:(nlayer-1)]])
thisNL<-length(thisWCLayers); thisWCcolors<-(waterColumnColors$color[match(thisWCLayers,waterColumnColors$layer)])
#get last value
if(length(dim(temp))==0){
fvalue<-signif(temp[nts],2)
points(temp,col=sedCol,lwd=2,type="l",lty=2)
}else{
fvalue<-signif(mean(temp[,nts]),2)
for(l in 1:thisNL){
thisCol<-as.character(thisWCcolors[l])
if(is.na(thisCol)){thisCol<-myGrey}
points(temp[l,],col=thisCol,lwd=2,type="l")
}
points(temp[(thisNL+1),],col=sedCol,lwd=2,type="l",lty=2)
}
mtext(paste(thisTracer,", box ",b,sep=""),side=3,adj=0.5,outer=FALSE)
axis(at=fvalue,labels=fvalue,side=4)
plot(shape)
# map('nzHires',add=TRUE,col="black",lwd=2)
# map.axes()
for(plotB in 1:dim(labeldf)[1]){
if(b== plotB){thisCol=myGreen}else{thisCol="white"}
polygon(sdata$shp$shp[[plotB]]$points,col=thisCol)
}
mtext(paste("Depth= ",thisDepth," m",sep=""),side=3,adj=1,outer=FALSE)
}
}
dev.off()
}
|
##' calculate cepstral coefficients using libassp
##'
##' Short-term cepstral analysis of the signal in <listOfFiles>
##' using the Fast Fourier Transform. The number of
##' coefficients per output record will also equal the
##' FFT length / 2 + 1 (i.e. be non-mirrored).
##' Analysis results will be written to a file with the
##' base name of the input file and as extension '.cep'.
##' Default output is in SSFF format with
##' 'cep' as track name.
##' @title cepstrum
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param beginTime = <time>: set begin of analysis interval to <time> seconds
##' (default: begin of data)
##' @param centerTime = <time>: set single-frame analysis with the analysis
##' window centred at <time> seconds; overrules beginTime, endTime and
##' windowShift options
##' @param endTime = <time>: set end of analysis interval to <time> seconds
##' (default: end of data)
##' @param resolution = <freq>: set FFT length to the smallest value which
##' results in a frequency resolution of <freq> Hz or better (default: 40.0)
##' @param fftLength = <num>: set FFT length to <num> points (overrules default
##' and 'resolution' option)
##' @param windowShift = <dur>: set analysis window shift to <dur> ms
##' (default: 5.0)
##' @param window = <type>: set analysis window function to <type> (default:
##' BLACKMAN)
##' @param toFile write results to file (default extension depends on )
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return
##' AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @seealso \code{\link{dftSpectrum}}, \code{\link{cssSpectrum}}, \code{\link{lpsSpectrum}};
##' all derived from libassp's spectrum function
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # calulate cepstrum
##' res <- cepstrum(path2wav, toFile=FALSE)
##'
##' # plot cepstral values at midpoint of signal
##' plot(res$cep[dim(res$cep)[1]/2,],
##' type='l',
##' xlab='cepstral value index',
##' ylab='cepstral value')
##'
##' @export
'cepstrum' <- function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, centerTime = FALSE,
endTime = 0.0, resolution = 40.0,
fftLength = 0, windowShift = 5.0,
window = 'BLACKMAN', toFile = TRUE,
explicitExt = NULL, outputDirectory = NULL,
forceToLog = useWrasspLogger, verbose = TRUE){
## ########################
## a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if(!isAsspWindowType(window)){
stop("WindowFunction of type '", window,"' is not supported!")
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
## #######################
## perform analysis
if(length(listOfFiles)==1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying cepstrum to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "spectrum", beginTime = beginTime,
centerTime = centerTime, endTime = endTime,
spectrumType = 'CEP',
resolution = resolution,
fftLength = as.integer(fftLength),
windowShift = windowShift, window = window,
toFile = toFile, explicitExt = explicitExt,
progressBar = pb, outputDirectory = outputDirectory,
PACKAGE = "wrassp"))
## #########################
## write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
## #########################
## return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
| /R/cepstrum.R | no_license | IPS-LMU/wrassp | R | false | false | 5,864 | r | ##' calculate cepstral coefficients using libassp
##'
##' Short-term cepstral analysis of the signal in <listOfFiles>
##' using the Fast Fourier Transform. The number of
##' coefficients per output record will also equal the
##' FFT length / 2 + 1 (i.e. be non-mirrored).
##' Analysis results will be written to a file with the
##' base name of the input file and as extension '.cep'.
##' Default output is in SSFF format with
##' 'cep' as track name.
##' @title cepstrum
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param beginTime = <time>: set begin of analysis interval to <time> seconds
##' (default: begin of data)
##' @param centerTime = <time>: set single-frame analysis with the analysis
##' window centred at <time> seconds; overrules beginTime, endTime and
##' windowShift options
##' @param endTime = <time>: set end of analysis interval to <time> seconds
##' (default: end of data)
##' @param resolution = <freq>: set FFT length to the smallest value which
##' results in a frequency resolution of <freq> Hz or better (default: 40.0)
##' @param fftLength = <num>: set FFT length to <num> points (overrules default
##' and 'resolution' option)
##' @param windowShift = <dur>: set analysis window shift to <dur> ms
##' (default: 5.0)
##' @param window = <type>: set analysis window function to <type> (default:
##' BLACKMAN)
##' @param toFile write results to file (default extension depends on )
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return
##' AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @seealso \code{\link{dftSpectrum}}, \code{\link{cssSpectrum}}, \code{\link{lpsSpectrum}};
##' all derived from libassp's spectrum function
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # calulate cepstrum
##' res <- cepstrum(path2wav, toFile=FALSE)
##'
##' # plot cepstral values at midpoint of signal
##' plot(res$cep[dim(res$cep)[1]/2,],
##' type='l',
##' xlab='cepstral value index',
##' ylab='cepstral value')
##'
##' @export
'cepstrum' <- function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, centerTime = FALSE,
endTime = 0.0, resolution = 40.0,
fftLength = 0, windowShift = 5.0,
window = 'BLACKMAN', toFile = TRUE,
explicitExt = NULL, outputDirectory = NULL,
forceToLog = useWrasspLogger, verbose = TRUE){
## ########################
## a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if(!isAsspWindowType(window)){
stop("WindowFunction of type '", window,"' is not supported!")
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
## #######################
## perform analysis
if(length(listOfFiles)==1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying cepstrum to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "spectrum", beginTime = beginTime,
centerTime = centerTime, endTime = endTime,
spectrumType = 'CEP',
resolution = resolution,
fftLength = as.integer(fftLength),
windowShift = windowShift, window = window,
toFile = toFile, explicitExt = explicitExt,
progressBar = pb, outputDirectory = outputDirectory,
PACKAGE = "wrassp"))
## #########################
## write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
## #########################
## return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
|
#' plotProtospacerDistribution Function
#'
#' @param Dat The CRISPRTarget file that is loaded
#' @param pval The cutoff pvalue to use for clustering. Default is 0.01
#' @export
#' @examples
#' plotProtospacerDistribution()
plotProtospacerDistribution <- function(Subtype.label = "I-F", dat = targets.dat){
binwidth.val <- 200
distance.window <- 10000
##store input variable for use later
input.dat <- dat
input.subtype <- Subtype.label
dat <- dat%>%filter(Subtype == Subtype.label)
dat <- dat%>%filter(spacer_order.num > 1)#%>%filter(protospacer.distance.num != -1)%>%filter(protospacer.distance.num != 1)%>%filter(protospacer.distance.num != -2)%>%filter(protospacer.distance.num != 2)%>%filter(protospacer.distance.num != -3)%>%filter(protospacer.distance.num != 3)
dat <- dat%>%filter(protospacer.distance.num > -distance.window)%>%filter(protospacer.distance.num < distance.window)
##get maximum count for the graphs
targets.dat.n0 <- dat%>%filter(Subtype == Subtype.label)
aa <- targets.dat.n0%>%filter(strand.plus.direction == "n_3")
bb <- targets.dat.n0%>%filter(strand.plus.direction == "n_5")
cc <- targets.dat.n0%>%filter(strand.plus.direction == "t_3")
dd <- targets.dat.n0%>%filter(strand.plus.direction == "t_5")
bins.max <- max(c(max(hist(aa$protospacer.distance.num, breaks = (distance.window)/binwidth.val, plot = F)$counts),
max(hist(bb$protospacer.distance.num, breaks = (distance.window)/binwidth.val, plot = F)$counts),
max(hist(cc$protospacer.distance.num, breaks = (distance.window)/binwidth.val, plot = F)$counts),
max(hist(dd$protospacer.distance.num, breaks = (distance.window)/binwidth.val, plot = F)$counts)))
##set up data for plotting
dat <- dat%>%mutate(protospacer.distance.num = ifelse(five.three.prime.dir == "3", protospacer.distance.num + binwidth.val/2, protospacer.distance.num - binwidth.val/2))
dat <- dat%>%mutate(protospacer.distance.num = ifelse(target.strand == "t", protospacer.distance.num, protospacer.distance.num*(-1)))
targets.dat.n0 <- dat%>%filter(Subtype == Subtype.label)
plot.title1 <- ifelse(distance.window != binwidth.val, paste("Distribution of Subtype ", Subtype.label, " hits (", nrow(targets.dat.n0), " hits.)" , sep = ""), paste("Quadrant distribution of Subtype ", Subtype.label, " hits (", nrow(targets.dat.n0), " hits)", sep = ""))
plot.subtitle <- ifelse(distance.window != binwidth.val, paste("Window size = ", distance.window, " nucleotides. \nBinwdith = ", binwidth.val, " nucleotides.", sep = ""), paste("Window size = ", distance.window, " nucleotides.", sep = ""))
p <- ggplot() +
geom_histogram(data=subset(targets.dat.n0, strand.plus.direction=="t_5"), binwidth = binwidth.val, aes(protospacer.distance.num, fill = "Target 5' direction", y= ..count..)) +
geom_histogram(data=subset(targets.dat.n0, strand.plus.direction=="t_3"), binwidth = binwidth.val, aes(protospacer.distance.num, fill = "Target 3' direction", y= ..count..)) +
geom_histogram(data=subset(targets.dat.n0, strand.plus.direction=="n_5"), binwidth = binwidth.val, aes(protospacer.distance.num, fill = "Non-target 5' direction", y= -..count..)) +
geom_histogram(data=subset(targets.dat.n0, strand.plus.direction=="n_3"), binwidth = binwidth.val, aes(protospacer.distance.num, fill = "Non-target 3' direction", y= -..count..)) +
#facet_wrap(~protospacer.distance.num)
scale_fill_hue("Group") +
ggtitle(label = plot.title1, subtitle = plot.subtitle) +
labs(x="Distance from oldest protospacer (nucleotides)",y="Number of hits") +
coord_cartesian(ylim = c(-bins.max - 3, bins.max + 3)) +
theme_bw() +
theme(axis.text.x=element_text(size=14),
axis.text.y=element_text(size=14),
plot.title=element_text(size=12, face="bold", color="black"))
return(p)
}
| /plotProtospacerDistribution.R | no_license | TJN25/CRISPRSpacerTools | R | false | false | 3,871 | r | #' plotProtospacerDistribution Function
#'
#' @param Dat The CRISPRTarget file that is loaded
#' @param pval The cutoff pvalue to use for clustering. Default is 0.01
#' @export
#' @examples
#' plotProtospacerDistribution()
plotProtospacerDistribution <- function(Subtype.label = "I-F", dat = targets.dat){
binwidth.val <- 200
distance.window <- 10000
##store input variable for use later
input.dat <- dat
input.subtype <- Subtype.label
dat <- dat%>%filter(Subtype == Subtype.label)
dat <- dat%>%filter(spacer_order.num > 1)#%>%filter(protospacer.distance.num != -1)%>%filter(protospacer.distance.num != 1)%>%filter(protospacer.distance.num != -2)%>%filter(protospacer.distance.num != 2)%>%filter(protospacer.distance.num != -3)%>%filter(protospacer.distance.num != 3)
dat <- dat%>%filter(protospacer.distance.num > -distance.window)%>%filter(protospacer.distance.num < distance.window)
##get maximum count for the graphs
targets.dat.n0 <- dat%>%filter(Subtype == Subtype.label)
aa <- targets.dat.n0%>%filter(strand.plus.direction == "n_3")
bb <- targets.dat.n0%>%filter(strand.plus.direction == "n_5")
cc <- targets.dat.n0%>%filter(strand.plus.direction == "t_3")
dd <- targets.dat.n0%>%filter(strand.plus.direction == "t_5")
bins.max <- max(c(max(hist(aa$protospacer.distance.num, breaks = (distance.window)/binwidth.val, plot = F)$counts),
max(hist(bb$protospacer.distance.num, breaks = (distance.window)/binwidth.val, plot = F)$counts),
max(hist(cc$protospacer.distance.num, breaks = (distance.window)/binwidth.val, plot = F)$counts),
max(hist(dd$protospacer.distance.num, breaks = (distance.window)/binwidth.val, plot = F)$counts)))
##set up data for plotting
dat <- dat%>%mutate(protospacer.distance.num = ifelse(five.three.prime.dir == "3", protospacer.distance.num + binwidth.val/2, protospacer.distance.num - binwidth.val/2))
dat <- dat%>%mutate(protospacer.distance.num = ifelse(target.strand == "t", protospacer.distance.num, protospacer.distance.num*(-1)))
targets.dat.n0 <- dat%>%filter(Subtype == Subtype.label)
plot.title1 <- ifelse(distance.window != binwidth.val, paste("Distribution of Subtype ", Subtype.label, " hits (", nrow(targets.dat.n0), " hits.)" , sep = ""), paste("Quadrant distribution of Subtype ", Subtype.label, " hits (", nrow(targets.dat.n0), " hits)", sep = ""))
plot.subtitle <- ifelse(distance.window != binwidth.val, paste("Window size = ", distance.window, " nucleotides. \nBinwdith = ", binwidth.val, " nucleotides.", sep = ""), paste("Window size = ", distance.window, " nucleotides.", sep = ""))
p <- ggplot() +
geom_histogram(data=subset(targets.dat.n0, strand.plus.direction=="t_5"), binwidth = binwidth.val, aes(protospacer.distance.num, fill = "Target 5' direction", y= ..count..)) +
geom_histogram(data=subset(targets.dat.n0, strand.plus.direction=="t_3"), binwidth = binwidth.val, aes(protospacer.distance.num, fill = "Target 3' direction", y= ..count..)) +
geom_histogram(data=subset(targets.dat.n0, strand.plus.direction=="n_5"), binwidth = binwidth.val, aes(protospacer.distance.num, fill = "Non-target 5' direction", y= -..count..)) +
geom_histogram(data=subset(targets.dat.n0, strand.plus.direction=="n_3"), binwidth = binwidth.val, aes(protospacer.distance.num, fill = "Non-target 3' direction", y= -..count..)) +
#facet_wrap(~protospacer.distance.num)
scale_fill_hue("Group") +
ggtitle(label = plot.title1, subtitle = plot.subtitle) +
labs(x="Distance from oldest protospacer (nucleotides)",y="Number of hits") +
coord_cartesian(ylim = c(-bins.max - 3, bins.max + 3)) +
theme_bw() +
theme(axis.text.x=element_text(size=14),
axis.text.y=element_text(size=14),
plot.title=element_text(size=12, face="bold", color="black"))
return(p)
}
|
library(tidyverse)
library(tidymodels)
trn_targ <- vroom::vroom(here::here("moa/Data/train_targets_scored.csv"))
trn <- vroom::vroom(here::here("moa/Data/train_features.csv"))
test <- vroom::vroom(here::here("moa/data/test_features.csv"))
#getting names
outs <- names(trn_targ[,2:207])
trn <- trn %>%
mutate(cp_time = as_factor(cp_time))
trn_targ <- trn_targ %>%
mutate(across(c(2:207), as_factor))
test <- test %>%
mutate(cp_time = as_factor(cp_time))
#creating function
full_mod <- function(out) {
rec <- recipe(~ ., data = trn %>% bind_cols(trn_targ %>% select(all_of(out)))) %>%
update_role(all_of(out), new_role = "outcome") %>%
update_role(sig_id, new_role = "id") %>%
step_center(all_numeric()) %>%
step_scale(all_numeric()) %>%
step_pca(all_numeric(), num_comp = 10)
rf_spec <- rand_forest() %>%
set_engine("ranger") %>%
set_mode("classification")
wf <- workflow() %>%
add_recipe(rec) %>%
add_model(rf_spec)
fit_wf <- wf %>%
fit(trn %>% bind_cols(trn_targ %>%
select(c(out))))
preds <- predict(fit_wf, new_data = test, type = "prob") %>%
select(2)
return(preds)
}
#getting all the preds
yy <- map(outs, ~full_mod(.x))
sub <- yy %>%
bind_cols(test$sig_id, .) %>%
set_names(names(trn_targ))
| /moa/R/moa init rf.R | no_license | ekholme/kaggle | R | false | false | 1,315 | r |
library(tidyverse)
library(tidymodels)
trn_targ <- vroom::vroom(here::here("moa/Data/train_targets_scored.csv"))
trn <- vroom::vroom(here::here("moa/Data/train_features.csv"))
test <- vroom::vroom(here::here("moa/data/test_features.csv"))
#getting names
outs <- names(trn_targ[,2:207])
trn <- trn %>%
mutate(cp_time = as_factor(cp_time))
trn_targ <- trn_targ %>%
mutate(across(c(2:207), as_factor))
test <- test %>%
mutate(cp_time = as_factor(cp_time))
#creating function
full_mod <- function(out) {
rec <- recipe(~ ., data = trn %>% bind_cols(trn_targ %>% select(all_of(out)))) %>%
update_role(all_of(out), new_role = "outcome") %>%
update_role(sig_id, new_role = "id") %>%
step_center(all_numeric()) %>%
step_scale(all_numeric()) %>%
step_pca(all_numeric(), num_comp = 10)
rf_spec <- rand_forest() %>%
set_engine("ranger") %>%
set_mode("classification")
wf <- workflow() %>%
add_recipe(rec) %>%
add_model(rf_spec)
fit_wf <- wf %>%
fit(trn %>% bind_cols(trn_targ %>%
select(c(out))))
preds <- predict(fit_wf, new_data = test, type = "prob") %>%
select(2)
return(preds)
}
#getting all the preds
yy <- map(outs, ~full_mod(.x))
sub <- yy %>%
bind_cols(test$sig_id, .) %>%
set_names(names(trn_targ))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DBResult.R
\name{dbFetch}
\alias{dbFetch}
\alias{fetch}
\title{Fetch records from a previously executed query}
\usage{
dbFetch(res, n = -1, ...)
fetch(res, n = -1, ...)
}
\arguments{
\item{res}{An object inheriting from \linkS4class{DBIResult}, created by
\code{\link[=dbSendQuery]{dbSendQuery()}}.}
\item{n}{maximum number of records to retrieve per fetch. Use \code{n = -1}
or \code{n = Inf}
to retrieve all pending records. Some implementations may recognize other
special values.}
\item{...}{Other arguments passed on to methods.}
}
\value{
\code{dbFetch()} always returns a \link{data.frame}
with as many rows as records were fetched and as many
columns as fields in the result set,
even if the result is a single value
or has one
or zero rows.
An attempt to fetch from a closed result set raises an error.
If the \code{n} argument is not an atomic whole number
greater or equal to -1 or Inf, an error is raised,
but a subsequent call to \code{dbFetch()} with proper \code{n} argument succeeds.
Calling \code{dbFetch()} on a result set from a data manipulation query
created by \code{\link[=dbSendStatement]{dbSendStatement()}}
can be fetched and return an empty data frame, with a warning.
}
\description{
Fetch the next \code{n} elements (rows) from the result set and return them
as a data.frame.
}
\details{
\code{fetch()} is provided for compatibility with older DBI clients - for all
new code you are strongly encouraged to use \code{dbFetch()}. The default
implementation for \code{dbFetch()} calls \code{fetch()} so that it is compatible with
existing code. Modern backends should implement for \code{dbFetch()} only.
}
\section{Specification}{
Fetching multi-row queries with one
or more columns be default returns the entire result.
Multi-row queries can also be fetched progressively
by passing a whole number (\link{integer}
or \link{numeric})
as the \code{n} argument.
A value of \link{Inf} for the \code{n} argument is supported
and also returns the full result.
If more rows than available are fetched, the result is returned in full
without warning.
If fewer rows than requested are returned, further fetches will
return a data frame with zero rows.
If zero rows are fetched, the columns of the data frame are still fully
typed.
Fetching fewer rows than available is permitted,
no warning is issued when clearing the result set.
The column types of the returned data frame depend on the data returned:
\itemize{
\item \link{integer} for integer values between -2^31 and 2^31 - 1
\item \link{numeric} for numbers with a fractional component
\item \link{logical} for Boolean values (some backends may return an integer)
\item \link{character} for text
\item lists of \link{raw} for blobs (with \code{NULL} entries for SQL NULL values)
\item coercible using \code{\link[=as.Date]{as.Date()}} for dates
(also applies to the return value of the SQL function \code{current_date})
\item coercible using \code{\link[hms:as.hms]{hms::as.hms()}} for times
(also applies to the return value of the SQL function \code{current_time})
\item coercible using \code{\link[=as.POSIXct]{as.POSIXct()}} for timestamps
(also applies to the return value of the SQL function \code{current_timestamp})
\item \link{NA} for SQL \code{NULL} values
}
If dates and timestamps are supported by the backend, the following R types are
used:
\itemize{
\item \link{Date} for dates
(also applies to the return value of the SQL function \code{current_date})
\item \link{POSIXct} for timestamps
(also applies to the return value of the SQL function \code{current_timestamp})
}
R has no built-in type with lossless support for the full range of 64-bit
or larger integers. Here, the following rules apply:
\itemize{
\item Values are returned as numeric
\item Conversion to character always returns a lossless decimal representation
of the data
}
}
\examples{
con <- dbConnect(RSQLite::SQLite(), ":memory:")
dbWriteTable(con, "mtcars", mtcars)
# Fetch all results
rs <- dbSendQuery(con, "SELECT * FROM mtcars WHERE cyl = 4")
dbFetch(rs)
dbClearResult(rs)
# Fetch in chunks
rs <- dbSendQuery(con, "SELECT * FROM mtcars")
while (!dbHasCompleted(rs)) {
chunk <- dbFetch(rs, 10)
print(nrow(chunk))
}
dbClearResult(rs)
dbDisconnect(con)
}
\seealso{
Close the result set with \code{\link[=dbClearResult]{dbClearResult()}} as soon as you
finish retrieving the records you want.
Other DBIResult generics: \code{\link{DBIResult-class}},
\code{\link{dbBind}}, \code{\link{dbClearResult}},
\code{\link{dbColumnInfo}}, \code{\link{dbGetInfo}},
\code{\link{dbGetRowCount}},
\code{\link{dbGetRowsAffected}},
\code{\link{dbGetStatement}},
\code{\link{dbHasCompleted}}, \code{\link{dbIsValid}},
\code{\link{dbQuoteIdentifier}},
\code{\link{dbQuoteString}}
}
| /man/dbFetch.Rd | no_license | jamieon/DBI | R | false | true | 4,840 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DBResult.R
\name{dbFetch}
\alias{dbFetch}
\alias{fetch}
\title{Fetch records from a previously executed query}
\usage{
dbFetch(res, n = -1, ...)
fetch(res, n = -1, ...)
}
\arguments{
\item{res}{An object inheriting from \linkS4class{DBIResult}, created by
\code{\link[=dbSendQuery]{dbSendQuery()}}.}
\item{n}{maximum number of records to retrieve per fetch. Use \code{n = -1}
or \code{n = Inf}
to retrieve all pending records. Some implementations may recognize other
special values.}
\item{...}{Other arguments passed on to methods.}
}
\value{
\code{dbFetch()} always returns a \link{data.frame}
with as many rows as records were fetched and as many
columns as fields in the result set,
even if the result is a single value
or has one
or zero rows.
An attempt to fetch from a closed result set raises an error.
If the \code{n} argument is not an atomic whole number
greater or equal to -1 or Inf, an error is raised,
but a subsequent call to \code{dbFetch()} with proper \code{n} argument succeeds.
Calling \code{dbFetch()} on a result set from a data manipulation query
created by \code{\link[=dbSendStatement]{dbSendStatement()}}
can be fetched and return an empty data frame, with a warning.
}
\description{
Fetch the next \code{n} elements (rows) from the result set and return them
as a data.frame.
}
\details{
\code{fetch()} is provided for compatibility with older DBI clients - for all
new code you are strongly encouraged to use \code{dbFetch()}. The default
implementation for \code{dbFetch()} calls \code{fetch()} so that it is compatible with
existing code. Modern backends should implement for \code{dbFetch()} only.
}
\section{Specification}{
Fetching multi-row queries with one
or more columns be default returns the entire result.
Multi-row queries can also be fetched progressively
by passing a whole number (\link{integer}
or \link{numeric})
as the \code{n} argument.
A value of \link{Inf} for the \code{n} argument is supported
and also returns the full result.
If more rows than available are fetched, the result is returned in full
without warning.
If fewer rows than requested are returned, further fetches will
return a data frame with zero rows.
If zero rows are fetched, the columns of the data frame are still fully
typed.
Fetching fewer rows than available is permitted,
no warning is issued when clearing the result set.
The column types of the returned data frame depend on the data returned:
\itemize{
\item \link{integer} for integer values between -2^31 and 2^31 - 1
\item \link{numeric} for numbers with a fractional component
\item \link{logical} for Boolean values (some backends may return an integer)
\item \link{character} for text
\item lists of \link{raw} for blobs (with \code{NULL} entries for SQL NULL values)
\item coercible using \code{\link[=as.Date]{as.Date()}} for dates
(also applies to the return value of the SQL function \code{current_date})
\item coercible using \code{\link[hms:as.hms]{hms::as.hms()}} for times
(also applies to the return value of the SQL function \code{current_time})
\item coercible using \code{\link[=as.POSIXct]{as.POSIXct()}} for timestamps
(also applies to the return value of the SQL function \code{current_timestamp})
\item \link{NA} for SQL \code{NULL} values
}
If dates and timestamps are supported by the backend, the following R types are
used:
\itemize{
\item \link{Date} for dates
(also applies to the return value of the SQL function \code{current_date})
\item \link{POSIXct} for timestamps
(also applies to the return value of the SQL function \code{current_timestamp})
}
R has no built-in type with lossless support for the full range of 64-bit
or larger integers. Here, the following rules apply:
\itemize{
\item Values are returned as numeric
\item Conversion to character always returns a lossless decimal representation
of the data
}
}
\examples{
con <- dbConnect(RSQLite::SQLite(), ":memory:")
dbWriteTable(con, "mtcars", mtcars)
# Fetch all results
rs <- dbSendQuery(con, "SELECT * FROM mtcars WHERE cyl = 4")
dbFetch(rs)
dbClearResult(rs)
# Fetch in chunks
rs <- dbSendQuery(con, "SELECT * FROM mtcars")
while (!dbHasCompleted(rs)) {
chunk <- dbFetch(rs, 10)
print(nrow(chunk))
}
dbClearResult(rs)
dbDisconnect(con)
}
\seealso{
Close the result set with \code{\link[=dbClearResult]{dbClearResult()}} as soon as you
finish retrieving the records you want.
Other DBIResult generics: \code{\link{DBIResult-class}},
\code{\link{dbBind}}, \code{\link{dbClearResult}},
\code{\link{dbColumnInfo}}, \code{\link{dbGetInfo}},
\code{\link{dbGetRowCount}},
\code{\link{dbGetRowsAffected}},
\code{\link{dbGetStatement}},
\code{\link{dbHasCompleted}}, \code{\link{dbIsValid}},
\code{\link{dbQuoteIdentifier}},
\code{\link{dbQuoteString}}
}
|
# source("http://bioconductor.org/biocLite.R")
# biocLite("GOSim")
library(GOSim)
| /R_scripts/enrichment.R | no_license | DawnEve/bioToolKit | R | false | false | 83 | r | # source("http://bioconductor.org/biocLite.R")
# biocLite("GOSim")
library(GOSim)
|
#set your own path
setwd("~")
# Model Selection ----------------
# * AIC Type Criteria ----------------
# local optimum
# May need to try different initial models
# Where do AIC and BIC comes from? KL divergence/ Bayesian theorem
# When do you want to use AIC? What about BIC?
library(MASS)
example(birthwt) # birth weight example
birthwt.glm <- glm(low~age+lwt+race+smoke+ptd,
family = binomial(), data = bwt)
birthwt.step <- stepAIC(birthwt.glm, trace = FALSE) # default direction is backward when scope is missing
birthwt.step$anova
stepAIC(birthwt.glm, trace = TRUE) # trace = TRUE to print out fitting process
# define a wider scope with more predictors in the upper model
Scope = list(upper = ~age+lwt+race+smoke+ptd+ht+ui+ftv, lower = ~1)
birthwt.step2 <- stepAIC(birthwt.glm, trace = FALSE, scope = Scope) # default direction is both when scope is specified
birthwt.step2$anova
#by choosing k = log(n), we can use BIC for the model selection
# * Likelihood Ratio Test ----------------
# D(smaller) - D(larger) ~ \chi^2_{df(larger) - df(smaller)}
bwtfit <- glm(formula = low ~ lwt + race + smoke + ptd, family = binomial(),
data = bwt)
h0.fit = glm(low~lwt + race + smoke, family=binomial(),
data = bwt)
anova(h0.fit, bwtfit, test="Chi")
# * Deviance Table ----------------
anova(glm(low~smoke+ptd+lwt, family = binomial(), data = bwt), test="Chi")
anova(glm(low~lwt+ptd+smoke, family = binomial(), data = bwt), test="Chi")
# Inconsistent result!
# Using deviance tables for model selection only when p is small;
# Needs to compare all the deviance tables corresponding to different orders of predictors
# Model Diagnostics ----------------
bwtfit <- glm(formula = low ~ lwt + race + smoke + ptd, family = binomial(),
data = bwt)
# * Deviance Residuals and Pearson Residuals ----------------
# We expect these two types of residuals have similar distributions.
# no lack-of-fit => similar boxplots
# similar boxplots -> next step: Residual Plots
res.P = residuals(bwtfit, type="pearson")
res.D = residuals(bwtfit, type="deviance") #or residuals(fit), by default
boxplot(cbind(res.P, res.D), names = c("Pearson", "Deviance"))
# * Residual Plots ----------------
# no lack-of-fit => no systematic pattern
# next step: Runs Test
par(mfrow=c(1,2))
plot(bwtfit$fitted.values, res.P, pch=16, cex=0.6, ylab='Pearson Residuals', xlab='Fitted Values')
lines(smooth.spline(bwtfit$fitted.values, res.P, spar=0.9), col=2)
abline(h=0, lty=2, col='grey')
plot(bwtfit$fitted.values, res.D, pch=16, cex=0.6, ylab='Deviance Residuals', xlab='Fitted Values')
lines(smooth.spline(bwtfit$fitted.values, res.D, spar=0.9), col=2)
abline(h=0, lty=2, col='grey')
# * Runs Test ----------------
# Null hypothesis: no systematic pattern
# Reject H0 => lack-of-fit
# In the plot: consecutive positives/negatives => systematic pattern
library(lawstat)
# please pay attention to the library
# there are different runs.test() functions in different packages
# we are specifically using this one!
runs.test(y = res.P, plot.it = TRUE)
title(main='Pearson Residual Runs Test')
runs.test(y = res.D, plot.it = TRUE)
title(main='Deviance Residual Runs Test')
# * Check Influential Points & Outliers ----------------
# ** Leverage Points ----------------
# leverage points => influential points
leverage = hatvalues(bwtfit)
W = diag(bwtfit$weights)
X = cbind(rep(1,nrow(bwt)), bwt[['lwt']], bwt[['race']]=='black',
bwt[['race']]=='other', bwt[['smoke']], bwt[['ptd']])
Hat = sqrt(W) %*% X %*% solve(t(X) %*% W %*% X) %*% t(X) %*% sqrt(W)
all(abs(leverage - diag(Hat)) < 1e-15)
plot(names(leverage), leverage, xlab="Index", type="h")
points(names(leverage), leverage, pch=16, cex=0.6)
p <- length(coef(bwtfit))
n <- nrow(bwt)
abline(h=2*p/n,col=2,lwd=2,lty=2)
infPts <- which(leverage>2*p/n)
# ** Cook's Distance ----------------
# high Cook's distance => influential points/outliers
# leverage points with high Cook's distance => suspicious influential points & outliers
# may need to be deleted -> check scatterplots
cooks = cooks.distance(bwtfit)
plot(cooks, ylab="Cook's Distance", pch=16, cex=0.6)
points(infPts, cooks[infPts], pch=17, cex=0.8, col=2)
susPts <- as.numeric(names(sort(cooks[infPts], decreasing=TRUE)[1:3]))
text(susPts, cooks[susPts], susPts, adj=c(-0.1,-0.1), cex=0.7, col=4)
dispersion <- 1
all(abs(cooks - (res.P/(1 - leverage))^2 * leverage/(dispersion * p) < 1e-15))
# Alternative Formulation of Logistic Regression in R ----------------
# In the previous example, the binary response is of individual (0-1) form
bwt$low
# Alternative grouped form: (# of successes, # of failures)
if(0){
example <- data.frame(lapply(esoph,as.numeric))
example <- example[,c(4:5,1:3)]
write.table(example, "example.dat", row.names = FALSE, col.names = TRUE)
}
# Read the data
d = read.table("example.dat", header=TRUE)
names(d) = c("Yes", "No", "A", "B", "C") # set var names
for(i in 3:5){ d[,i] = as.factor(d[,i]) } # convert to factor
# Alternatively, this can be done by
d = read.table("example.dat", header=TRUE,
colClasses=c("integer","integer", "factor","factor","factor"))
names(d) = c("Yes", "No", "A", "B", "C")
# Fit logistic regression via glm
# main effect only
fit1 = glm(cbind(Yes, No) ~ A + B + C, data = d, family = binomial())
# main effect & up to 2-way interactions
fit2 = glm(cbind(Yes, No) ~ (A + B + C) * (A + B + C),
data = d, family = binomial())
# Recall that A * B <=> A + B + A:B, main effect plus interaction
# A : B : C means three-way interaction term only
# A * B * C means main effect, all 2-way plus 3-way interaction
| /BST 223/LAB2_Model_Selection_Diagnostics.R | no_license | zoubohao/UC_Davis_STA_CS_Coureses | R | false | false | 5,698 | r | #set your own path
setwd("~")
# Model Selection ----------------
# * AIC Type Criteria ----------------
# local optimum
# May need to try different initial models
# Where do AIC and BIC comes from? KL divergence/ Bayesian theorem
# When do you want to use AIC? What about BIC?
library(MASS)
example(birthwt) # birth weight example
birthwt.glm <- glm(low~age+lwt+race+smoke+ptd,
family = binomial(), data = bwt)
birthwt.step <- stepAIC(birthwt.glm, trace = FALSE) # default direction is backward when scope is missing
birthwt.step$anova
stepAIC(birthwt.glm, trace = TRUE) # trace = TRUE to print out fitting process
# define a wider scope with more predictors in the upper model
Scope = list(upper = ~age+lwt+race+smoke+ptd+ht+ui+ftv, lower = ~1)
birthwt.step2 <- stepAIC(birthwt.glm, trace = FALSE, scope = Scope) # default direction is both when scope is specified
birthwt.step2$anova
#by choosing k = log(n), we can use BIC for the model selection
# * Likelihood Ratio Test ----------------
# D(smaller) - D(larger) ~ \chi^2_{df(larger) - df(smaller)}
bwtfit <- glm(formula = low ~ lwt + race + smoke + ptd, family = binomial(),
data = bwt)
h0.fit = glm(low~lwt + race + smoke, family=binomial(),
data = bwt)
anova(h0.fit, bwtfit, test="Chi")
# * Deviance Table ----------------
anova(glm(low~smoke+ptd+lwt, family = binomial(), data = bwt), test="Chi")
anova(glm(low~lwt+ptd+smoke, family = binomial(), data = bwt), test="Chi")
# Inconsistent result!
# Using deviance tables for model selection only when p is small;
# Needs to compare all the deviance tables corresponding to different orders of predictors
# Model Diagnostics ----------------
bwtfit <- glm(formula = low ~ lwt + race + smoke + ptd, family = binomial(),
data = bwt)
# * Deviance Residuals and Pearson Residuals ----------------
# We expect these two types of residuals have similar distributions.
# no lack-of-fit => similar boxplots
# similar boxplots -> next step: Residual Plots
res.P = residuals(bwtfit, type="pearson")
res.D = residuals(bwtfit, type="deviance") #or residuals(fit), by default
boxplot(cbind(res.P, res.D), names = c("Pearson", "Deviance"))
# * Residual Plots ----------------
# no lack-of-fit => no systematic pattern
# next step: Runs Test
par(mfrow=c(1,2))
plot(bwtfit$fitted.values, res.P, pch=16, cex=0.6, ylab='Pearson Residuals', xlab='Fitted Values')
lines(smooth.spline(bwtfit$fitted.values, res.P, spar=0.9), col=2)
abline(h=0, lty=2, col='grey')
plot(bwtfit$fitted.values, res.D, pch=16, cex=0.6, ylab='Deviance Residuals', xlab='Fitted Values')
lines(smooth.spline(bwtfit$fitted.values, res.D, spar=0.9), col=2)
abline(h=0, lty=2, col='grey')
# * Runs Test ----------------
# Null hypothesis: no systematic pattern
# Reject H0 => lack-of-fit
# In the plot: consecutive positives/negatives => systematic pattern
library(lawstat)
# please pay attention to the library
# there are different runs.test() functions in different packages
# we are specifically using this one!
runs.test(y = res.P, plot.it = TRUE)
title(main='Pearson Residual Runs Test')
runs.test(y = res.D, plot.it = TRUE)
title(main='Deviance Residual Runs Test')
# * Check Influential Points & Outliers ----------------
# ** Leverage Points ----------------
# leverage points => influential points
leverage = hatvalues(bwtfit)
W = diag(bwtfit$weights)
X = cbind(rep(1,nrow(bwt)), bwt[['lwt']], bwt[['race']]=='black',
bwt[['race']]=='other', bwt[['smoke']], bwt[['ptd']])
Hat = sqrt(W) %*% X %*% solve(t(X) %*% W %*% X) %*% t(X) %*% sqrt(W)
all(abs(leverage - diag(Hat)) < 1e-15)
plot(names(leverage), leverage, xlab="Index", type="h")
points(names(leverage), leverage, pch=16, cex=0.6)
p <- length(coef(bwtfit))
n <- nrow(bwt)
abline(h=2*p/n,col=2,lwd=2,lty=2)
infPts <- which(leverage>2*p/n)
# ** Cook's Distance ----------------
# high Cook's distance => influential points/outliers
# leverage points with high Cook's distance => suspicious influential points & outliers
# may need to be deleted -> check scatterplots
cooks = cooks.distance(bwtfit)
plot(cooks, ylab="Cook's Distance", pch=16, cex=0.6)
points(infPts, cooks[infPts], pch=17, cex=0.8, col=2)
susPts <- as.numeric(names(sort(cooks[infPts], decreasing=TRUE)[1:3]))
text(susPts, cooks[susPts], susPts, adj=c(-0.1,-0.1), cex=0.7, col=4)
dispersion <- 1
all(abs(cooks - (res.P/(1 - leverage))^2 * leverage/(dispersion * p) < 1e-15))
# Alternative Formulation of Logistic Regression in R ----------------
# In the previous example, the binary response is of individual (0-1) form
bwt$low
# Alternative grouped form: (# of successes, # of failures)
if(0){
example <- data.frame(lapply(esoph,as.numeric))
example <- example[,c(4:5,1:3)]
write.table(example, "example.dat", row.names = FALSE, col.names = TRUE)
}
# Read the data
d = read.table("example.dat", header=TRUE)
names(d) = c("Yes", "No", "A", "B", "C") # set var names
for(i in 3:5){ d[,i] = as.factor(d[,i]) } # convert to factor
# Alternatively, this can be done by
d = read.table("example.dat", header=TRUE,
colClasses=c("integer","integer", "factor","factor","factor"))
names(d) = c("Yes", "No", "A", "B", "C")
# Fit logistic regression via glm
# main effect only
fit1 = glm(cbind(Yes, No) ~ A + B + C, data = d, family = binomial())
# main effect & up to 2-way interactions
fit2 = glm(cbind(Yes, No) ~ (A + B + C) * (A + B + C),
data = d, family = binomial())
# Recall that A * B <=> A + B + A:B, main effect plus interaction
# A : B : C means three-way interaction term only
# A * B * C means main effect, all 2-way plus 3-way interaction
|
---
title: "R Notebook"
output: html_notebook
---
```{r}
#data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2018Age.csv")
#data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2018Gender.csv")
#data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2018Income.csv")
data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2018Edu.csv")
data$Break_Out=factor(data$Break_Out)
#par(mfrow=c(1,2))
plot(Data_value~Break_Out,data,xlab="Edu",ylab="Percentage of Current Smoker")
stripchart(Data_value~Break_Out,data,vertical=TRUE,method="stack",xlab="Age",ylab="Percentage of Current Smoker")
```
```{r}
lm = lm(log(Data_value)~Break_Out,data)
#lm = lm(Data_value~Break_Out-1,data)
summary(lm)
```
```{r}
round(coef(lm),1)
```
Although Here the residuals are normal with some outliers and so we can go ahead with the inference without much concern.
```{r}
anova(lm)
par(mfrow=c(1,2))
qqnorm(residuals(lm))
qqline(residuals(lm))
plot(jitter(fitted(lm)),residuals(lm),xlab="Fitted",ylab="Residuals")+abline(h=0)
```
Since there exists
```{r}
#med = with(data,tapply(Data_value,Break_Out,median))
#ar = with(data,abs(Data_value-med[Break_Out]))
#anova(lm(ar~Break_Out,data))
# log
med = with(data,tapply(log(Data_value),Break_Out,median))
ar = with(data,abs(log(Data_value)-med[Break_Out]))
anova(lm(ar~Break_Out,data))
```
```{r}
library(PMCMRplus)
vartest=anova(lm(ar~Break_Out,data))
if(vartest$`Pr(>F)`[1]<0.05){
summary(tamhaneT2Test(log(data$Data_value),data$Break_Out))
}else{
print(tci <- TukeyHSD(aov(Data_value~Break_Out,data)))
plot(tci)
}
```
```{r}
require(MASS)
lm = lm(Data_value~Break_Out,data)
boxcox(lm,plotit=T,lambda=seq(-0.08,0.23,by=0.01))
```
```{r}
data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/FormeronYear.csv")
data$Year=factor(data$Year)
par(mfrow=c(1,2))
plot(Data_value~Year,data,xlab="Year",ylab="Smoke every day")
stripchart(Data_value~Year,data,vertical=TRUE,method="stack",xlab="Year",ylab="Smoke every day")
lm = lm(Data_value~Year,data)
summary(lm)
anova(lm)
par(mfrow=c(1,2))
qqnorm(residuals(lm))
qqline(residuals(lm))
plot(jitter(fitted(lm)),residuals(lm),xlab="Fitted",ylab="Residuals")+abline(h=0)
lmnull=lm(Data_value~1,data)
anova(lmnull,lm)
```
```{r}
data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2020Age.csv")
#data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2019Age.csv")
#data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2018Age.csv")
data$Break_Out=factor(data$Break_Out)
par(mfrow=c(1,2))
plot(Data_value~Break_Out,data,xlab="Age",ylab="Percentage of Current Smoker")
stripchart(Data_value~Break_Out,data,vertical=TRUE,method="stack",xlab="Age",ylab="Percentage of Current Smoker")
print("----------ANOVA with intercept----------")
print("if P(>F)<0.05, there is indeed a difference in the levels.")
lm = lm(Data_value~Break_Out,data)
#summary(lm)
anova(lm)
print("----------Fited model without intercept----------")
lmodi = lm(Data_value~Break_Out-1,data)
summary(lmodi)
print("----------Fited model without intercept ANOVA----------")
lmnull=lm(Data_value~1,data)
anova(lmnull,lmodi)
qqnorm(residuals(lm))
qqline(residuals(lm))
plot(jitter(fitted(lm)),residuals(lm),xlab="Fitted",ylab="Residuals")+abline(h=0)
print("----------Levene's Test----------")
print("if P(>F)>0.05, there is no evidence of non-constant variance.")
med = with(data,tapply(Data_value,Break_Out,median))
ar = with(data,abs(Data_value-med[Break_Out]))
anova(lm(ar~Break_Out,data))
if(vartest$`Pr(>F)`[1]<0.05){
print("----------Transformation----------")
require(MASS)
boxcox(lm,plotit=T,lambda=seq(0.05,0.5,by=0.01))
lmtr = lm((4*(Data_value^(0.25)-1))~Break_Out,data)
summary(lmtr)
par(mfrow=c(1,2))
qqnorm(residuals(lmtr))
qqline(residuals(lmtr))
plot(jitter(fitted(lmtr)),residuals(lmtr),xlab="Fitted",ylab="Residuals")+abline(h=0)
print("----------Levene's Test with Transformation----------")
print("if P(>F)>0.05, there is no evidence of non-constant variance.")
medtr = with(data,tapply((4*(Data_value^(0.25)-1)),Break_Out,median))
artr = with(data,abs((4*(Data_value^(0.25)-1))-med[Break_Out]))
anova(lm(artr~Break_Out,data))
}
print("----------Comparison Test----------")
library(PMCMRplus)
vartest=anova(lm(ar~Break_Out,data))
if(vartest$`Pr(>F)`[1]<0.05){
print("----------Non-constant Var----------")
summary(tamhaneT2Test(log(data$Data_value),data$Break_Out))
}else{
print("----------Constant Var----------")
print(tci <- TukeyHSD(aov(Data_value~Break_Out,data)))
plot(tci)
}
```
```{r}
#data18 = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2018Edu.csv")
#data19 = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2019Edu.csv")
data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/FormeronYear.csv")
#data18$Break_Out=factor(data18$Break_Out)
#data19$Break_Out=factor(data19$Break_Out)
data$Year=factor(data$Break_Out)
par(mfrow=c(1,3))
plot(Data_value~Break_Out,data,xlab="Edu(2020)",ylab="Percentage of Current Smoker")
plot(Data_value~Break_Out,data19,xlab="Edu(2019)",ylab="Percentage of Current Smoker")
plot(Data_value~Break_Out,data18,xlab="Edu(2018)",ylab="Percentage of Current Smoker")
#stripchart(Data_value~Break_Out,data,vertical=TRUE,method="stack",xlab="Age",ylab="Percentage of Current Smoker")
print("----------ANOVA with intercept----------")
print("if P(>F)<0.05, there is indeed a difference in the levels.")
lm = lm(Data_value~Break_Out,data)
#summary(lm)
anova(lm)
print("----------Fited model without intercept----------")
lmodi = lm(Data_value~Break_Out-1,data)
summary(lmodi)
print("----------Fited model without intercept ANOVA----------")
lmnull=lm(Data_value~1,data)
anova(lmnull,lmodi)
qqnorm(residuals(lm))
qqline(residuals(lm))
plot(jitter(fitted(lm)),residuals(lm),xlab="Fitted",ylab="Residuals")+abline(h=0)
print("----------Levene's Test----------")
print("if P(>F)>0.05, there is no evidence of non-constant variance.")
med = with(data,tapply(Data_value,Break_Out,median))
ar = with(data,abs(Data_value-med[Break_Out]))
anova(lm(ar~Break_Out,data))
print("----------Transformation----------")
require(MASS)
boxcox(lm,plotit=T,lambda=seq(1.5,3.3,by=0.01))
lmtr = lm(1/2.25*(Data_value^(2.25)-1)~Break_Out,data)
summary(lmtr)
par(mfrow=c(1,2))
qqnorm(residuals(lmtr))
qqline(residuals(lmtr))
plot(jitter(fitted(lmtr)),residuals(lmtr),xlab="Fitted",ylab="Residuals")+abline(h=0)
print("----------Levene's Test with Transformation----------")
print("if P(>F)>0.05, there is no evidence of non-constant variance.")
medtr = with(data,tapply(1/2.25*(Data_value^(2.25)-1),Break_Out,median))
artr = with(data,abs(1/2.25*(Data_value^(2.25)-1)-medtr[Break_Out]))
anova(lm(artr~Break_Out,data))
print("----------Comparison Test----------")
library(PMCMRplus)
vartest=anova(lm(ar~Break_Out,data))
if(vartest$`Pr(>F)`[1]<0.05){
print("----------Non-constant Var----------")
summary(tamhaneT2Test(data$Data_value,data$Break_Out))
}else{
print("----------Constant Var----------")
print(tci <- TukeyHSD(aov(Data_value~Break_Out,data)))
plot(tci)
print(tci <- TukeyHSD(aov(1/2.25*(Data_value^(2.25)-1)~Break_Out,data)))
plot(tci)
tci20 <- TukeyHSD(aov(Data_value~Break_Out,data))
tci19 <- TukeyHSD(aov(Data_value~Break_Out,data19))
tci18 <- TukeyHSD(aov(Data_value~Break_Out,data18))
par(mfrow=c(1,3))
plot(tci20)
plot(tci19)
plot(tci18)
}
```
```{r}
data2 = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/NeverSmokedonYear.csv")
data3 = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/SmokeEverydayonYear.csv")
data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/FormeronYear.csv")
data$Year=factor(data$Year)
data2$Year=factor(data2$Year)
data3$Year=factor(data3$Year)
par(mfrow=c(1,3))
plot(Data_value~Year,data2,xlab="Year",ylab="Never Smoked")
plot(Data_value~Year,data,xlab="Year",ylab="Former")
plot(Data_value~Year,data3,xlab="Year",ylab="Smoke every day")
#stripchart(Data_value~Year,data,vertical=TRUE,method="stack",xlab="Year",ylab="Smoke every day")
lm = lm(Data_value~Year,data)
summary(lm)
anova(lm)
par(mfrow=c(1,2))
qqnorm(residuals(lm))
qqline(residuals(lm))
plot(jitter(fitted(lm)),residuals(lm),xlab="Fitted",ylab="Residuals")+abline(h=0)
``` | /code/Datathon.R | no_license | mtanghu/Citadel-Central-Datathon-Fall21 | R | false | false | 8,264 | r | ---
title: "R Notebook"
output: html_notebook
---
```{r}
#data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2018Age.csv")
#data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2018Gender.csv")
#data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2018Income.csv")
data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2018Edu.csv")
data$Break_Out=factor(data$Break_Out)
#par(mfrow=c(1,2))
plot(Data_value~Break_Out,data,xlab="Edu",ylab="Percentage of Current Smoker")
stripchart(Data_value~Break_Out,data,vertical=TRUE,method="stack",xlab="Age",ylab="Percentage of Current Smoker")
```
```{r}
lm = lm(log(Data_value)~Break_Out,data)
#lm = lm(Data_value~Break_Out-1,data)
summary(lm)
```
```{r}
round(coef(lm),1)
```
Although Here the residuals are normal with some outliers and so we can go ahead with the inference without much concern.
```{r}
anova(lm)
par(mfrow=c(1,2))
qqnorm(residuals(lm))
qqline(residuals(lm))
plot(jitter(fitted(lm)),residuals(lm),xlab="Fitted",ylab="Residuals")+abline(h=0)
```
Since there exists
```{r}
#med = with(data,tapply(Data_value,Break_Out,median))
#ar = with(data,abs(Data_value-med[Break_Out]))
#anova(lm(ar~Break_Out,data))
# log
med = with(data,tapply(log(Data_value),Break_Out,median))
ar = with(data,abs(log(Data_value)-med[Break_Out]))
anova(lm(ar~Break_Out,data))
```
```{r}
library(PMCMRplus)
vartest=anova(lm(ar~Break_Out,data))
if(vartest$`Pr(>F)`[1]<0.05){
summary(tamhaneT2Test(log(data$Data_value),data$Break_Out))
}else{
print(tci <- TukeyHSD(aov(Data_value~Break_Out,data)))
plot(tci)
}
```
```{r}
require(MASS)
lm = lm(Data_value~Break_Out,data)
boxcox(lm,plotit=T,lambda=seq(-0.08,0.23,by=0.01))
```
```{r}
data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/FormeronYear.csv")
data$Year=factor(data$Year)
par(mfrow=c(1,2))
plot(Data_value~Year,data,xlab="Year",ylab="Smoke every day")
stripchart(Data_value~Year,data,vertical=TRUE,method="stack",xlab="Year",ylab="Smoke every day")
lm = lm(Data_value~Year,data)
summary(lm)
anova(lm)
par(mfrow=c(1,2))
qqnorm(residuals(lm))
qqline(residuals(lm))
plot(jitter(fitted(lm)),residuals(lm),xlab="Fitted",ylab="Residuals")+abline(h=0)
lmnull=lm(Data_value~1,data)
anova(lmnull,lm)
```
```{r}
data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2020Age.csv")
#data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2019Age.csv")
#data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2018Age.csv")
data$Break_Out=factor(data$Break_Out)
par(mfrow=c(1,2))
plot(Data_value~Break_Out,data,xlab="Age",ylab="Percentage of Current Smoker")
stripchart(Data_value~Break_Out,data,vertical=TRUE,method="stack",xlab="Age",ylab="Percentage of Current Smoker")
print("----------ANOVA with intercept----------")
print("if P(>F)<0.05, there is indeed a difference in the levels.")
lm = lm(Data_value~Break_Out,data)
#summary(lm)
anova(lm)
print("----------Fited model without intercept----------")
lmodi = lm(Data_value~Break_Out-1,data)
summary(lmodi)
print("----------Fited model without intercept ANOVA----------")
lmnull=lm(Data_value~1,data)
anova(lmnull,lmodi)
qqnorm(residuals(lm))
qqline(residuals(lm))
plot(jitter(fitted(lm)),residuals(lm),xlab="Fitted",ylab="Residuals")+abline(h=0)
print("----------Levene's Test----------")
print("if P(>F)>0.05, there is no evidence of non-constant variance.")
med = with(data,tapply(Data_value,Break_Out,median))
ar = with(data,abs(Data_value-med[Break_Out]))
anova(lm(ar~Break_Out,data))
if(vartest$`Pr(>F)`[1]<0.05){
print("----------Transformation----------")
require(MASS)
boxcox(lm,plotit=T,lambda=seq(0.05,0.5,by=0.01))
lmtr = lm((4*(Data_value^(0.25)-1))~Break_Out,data)
summary(lmtr)
par(mfrow=c(1,2))
qqnorm(residuals(lmtr))
qqline(residuals(lmtr))
plot(jitter(fitted(lmtr)),residuals(lmtr),xlab="Fitted",ylab="Residuals")+abline(h=0)
print("----------Levene's Test with Transformation----------")
print("if P(>F)>0.05, there is no evidence of non-constant variance.")
medtr = with(data,tapply((4*(Data_value^(0.25)-1)),Break_Out,median))
artr = with(data,abs((4*(Data_value^(0.25)-1))-med[Break_Out]))
anova(lm(artr~Break_Out,data))
}
print("----------Comparison Test----------")
library(PMCMRplus)
vartest=anova(lm(ar~Break_Out,data))
if(vartest$`Pr(>F)`[1]<0.05){
print("----------Non-constant Var----------")
summary(tamhaneT2Test(log(data$Data_value),data$Break_Out))
}else{
print("----------Constant Var----------")
print(tci <- TukeyHSD(aov(Data_value~Break_Out,data)))
plot(tci)
}
```
```{r}
#data18 = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2018Edu.csv")
#data19 = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/isCurrent2019Edu.csv")
data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/FormeronYear.csv")
#data18$Break_Out=factor(data18$Break_Out)
#data19$Break_Out=factor(data19$Break_Out)
data$Year=factor(data$Break_Out)
par(mfrow=c(1,3))
plot(Data_value~Break_Out,data,xlab="Edu(2020)",ylab="Percentage of Current Smoker")
plot(Data_value~Break_Out,data19,xlab="Edu(2019)",ylab="Percentage of Current Smoker")
plot(Data_value~Break_Out,data18,xlab="Edu(2018)",ylab="Percentage of Current Smoker")
#stripchart(Data_value~Break_Out,data,vertical=TRUE,method="stack",xlab="Age",ylab="Percentage of Current Smoker")
print("----------ANOVA with intercept----------")
print("if P(>F)<0.05, there is indeed a difference in the levels.")
lm = lm(Data_value~Break_Out,data)
#summary(lm)
anova(lm)
print("----------Fited model without intercept----------")
lmodi = lm(Data_value~Break_Out-1,data)
summary(lmodi)
print("----------Fited model without intercept ANOVA----------")
lmnull=lm(Data_value~1,data)
anova(lmnull,lmodi)
qqnorm(residuals(lm))
qqline(residuals(lm))
plot(jitter(fitted(lm)),residuals(lm),xlab="Fitted",ylab="Residuals")+abline(h=0)
print("----------Levene's Test----------")
print("if P(>F)>0.05, there is no evidence of non-constant variance.")
med = with(data,tapply(Data_value,Break_Out,median))
ar = with(data,abs(Data_value-med[Break_Out]))
anova(lm(ar~Break_Out,data))
print("----------Transformation----------")
require(MASS)
boxcox(lm,plotit=T,lambda=seq(1.5,3.3,by=0.01))
lmtr = lm(1/2.25*(Data_value^(2.25)-1)~Break_Out,data)
summary(lmtr)
par(mfrow=c(1,2))
qqnorm(residuals(lmtr))
qqline(residuals(lmtr))
plot(jitter(fitted(lmtr)),residuals(lmtr),xlab="Fitted",ylab="Residuals")+abline(h=0)
print("----------Levene's Test with Transformation----------")
print("if P(>F)>0.05, there is no evidence of non-constant variance.")
medtr = with(data,tapply(1/2.25*(Data_value^(2.25)-1),Break_Out,median))
artr = with(data,abs(1/2.25*(Data_value^(2.25)-1)-medtr[Break_Out]))
anova(lm(artr~Break_Out,data))
print("----------Comparison Test----------")
library(PMCMRplus)
vartest=anova(lm(ar~Break_Out,data))
if(vartest$`Pr(>F)`[1]<0.05){
print("----------Non-constant Var----------")
summary(tamhaneT2Test(data$Data_value,data$Break_Out))
}else{
print("----------Constant Var----------")
print(tci <- TukeyHSD(aov(Data_value~Break_Out,data)))
plot(tci)
print(tci <- TukeyHSD(aov(1/2.25*(Data_value^(2.25)-1)~Break_Out,data)))
plot(tci)
tci20 <- TukeyHSD(aov(Data_value~Break_Out,data))
tci19 <- TukeyHSD(aov(Data_value~Break_Out,data19))
tci18 <- TukeyHSD(aov(Data_value~Break_Out,data18))
par(mfrow=c(1,3))
plot(tci20)
plot(tci19)
plot(tci18)
}
```
```{r}
data2 = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/NeverSmokedonYear.csv")
data3 = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/SmokeEverydayonYear.csv")
data = read.csv("C:/Users/Jun/Desktop/Courses/Datathon/FormeronYear.csv")
data$Year=factor(data$Year)
data2$Year=factor(data2$Year)
data3$Year=factor(data3$Year)
par(mfrow=c(1,3))
plot(Data_value~Year,data2,xlab="Year",ylab="Never Smoked")
plot(Data_value~Year,data,xlab="Year",ylab="Former")
plot(Data_value~Year,data3,xlab="Year",ylab="Smoke every day")
#stripchart(Data_value~Year,data,vertical=TRUE,method="stack",xlab="Year",ylab="Smoke every day")
lm = lm(Data_value~Year,data)
summary(lm)
anova(lm)
par(mfrow=c(1,2))
qqnorm(residuals(lm))
qqline(residuals(lm))
plot(jitter(fitted(lm)),residuals(lm),xlab="Fitted",ylab="Residuals")+abline(h=0)
``` |
setwd("/work/STAT/ajsage")
source("Imputation_VI_Functions.R")
library(parallel)
library(mlbench)
data("BostonHousing")
xyabv <- BostonHousing
xyabv$chas <- as.numeric(as.character(xyabv$chas))
names(xyabv)[14]="y"
#xabv=xyabv[,-14]
#yabv=xyabv$y
# Calculate the number of cores
no_cores <- detectCores() - 1
# Initiate cluster
cl <- makeCluster(no_cores)
clusterExport(cl, c("Impute_and_VI", "CaliberVI", "miceVI", "Gen_Del_Impute", "Generate_Sim_Data", "DeleteMissing", "Del_Impute", "Del_Impute_wrapper", "xyabv"))
clusterEvalQ(cl, {
library(randomForest)
library(randomForestSRC)
library(CALIBERrfimpute)
library(mice)
library(MASS)
})
clusterSetRNGStream(cl, 02012018)
MVVIMP <- parSapply(cl=cl, X=1:100,FUN=function(i){Del_Impute_wrapper(data=xyabv, xvarvec=c(1,4,6,7),pvec=c(0, 0.1, 0.25, 0.5, 0.75), ntrees=500, missingness="MNAR")} )
stopCluster(cl)
save(MVVIMP, file="MVVIMP_BH_MNAR.Rdata")
| /Sims/Housing_MNAR.R | no_license | AndrewjSage/Imputation_Variable_Importance | R | false | false | 924 | r | setwd("/work/STAT/ajsage")
source("Imputation_VI_Functions.R")
library(parallel)
library(mlbench)
data("BostonHousing")
xyabv <- BostonHousing
xyabv$chas <- as.numeric(as.character(xyabv$chas))
names(xyabv)[14]="y"
#xabv=xyabv[,-14]
#yabv=xyabv$y
# Calculate the number of cores
no_cores <- detectCores() - 1
# Initiate cluster
cl <- makeCluster(no_cores)
clusterExport(cl, c("Impute_and_VI", "CaliberVI", "miceVI", "Gen_Del_Impute", "Generate_Sim_Data", "DeleteMissing", "Del_Impute", "Del_Impute_wrapper", "xyabv"))
clusterEvalQ(cl, {
library(randomForest)
library(randomForestSRC)
library(CALIBERrfimpute)
library(mice)
library(MASS)
})
clusterSetRNGStream(cl, 02012018)
MVVIMP <- parSapply(cl=cl, X=1:100,FUN=function(i){Del_Impute_wrapper(data=xyabv, xvarvec=c(1,4,6,7),pvec=c(0, 0.1, 0.25, 0.5, 0.75), ntrees=500, missingness="MNAR")} )
stopCluster(cl)
save(MVVIMP, file="MVVIMP_BH_MNAR.Rdata")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class-mSet.R
\docType{class}
\name{mSet-class}
\alias{mSet-class}
\title{A virtual S4 class to store an quantitative experiment data.}
\description{
This is a virtual S4 class to store the entire dataset from a quantitative
experiment, such as metabolomics and proteomics experiments.
This is a virtual class so it can only be inherited from, but not be
constructed directly. You can either use the classes the inherits this class
defined by this package, or you can define your own class and inherits it to
use some of its features.
The classes that inherits from the virtual mSet class are: the
\code{\link{MetabolomicsSet-class}} and the \code{\link{ProteomicsSet-class}}
.
The mSet class and all the classes that inherits from it should contain at
least the four slots that were discussed below.
}
\section{Slots}{
\describe{
\item{\code{conc_table}}{A \code{\link{conc_table-class}} object that stores the
concentration information from the experiment. The column names should be
the feature IDs, and the rownames should be the row IDs. This should be a
numeric matrix.}
\item{\code{sample_table}}{A \code{\link{sample_table-class}} object that stores the
sample meta-data information. The row names should be sample IDs and
should match the column names of the conc_table.}
\item{\code{feature_data}}{A \code{\link{feature_data-class}} object that stores the
feature infromation during the experiment. The row names should be feature
IDs and should match the row names of the conc_table.}
\item{\code{experiment_data}}{This must be an object that inherits from the virtual
\code{\link{ExperimentData-class}}. This slot stores the experiment
information such as lab and instrument. It also varies among different
experiment types.}
}}
\author{
Chenghao Zhu
}
| /man/mSet-class.Rd | no_license | zivkovic-lab/Metabase | R | false | true | 1,850 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class-mSet.R
\docType{class}
\name{mSet-class}
\alias{mSet-class}
\title{A virtual S4 class to store an quantitative experiment data.}
\description{
This is a virtual S4 class to store the entire dataset from a quantitative
experiment, such as metabolomics and proteomics experiments.
This is a virtual class so it can only be inherited from, but not be
constructed directly. You can either use the classes the inherits this class
defined by this package, or you can define your own class and inherits it to
use some of its features.
The classes that inherits from the virtual mSet class are: the
\code{\link{MetabolomicsSet-class}} and the \code{\link{ProteomicsSet-class}}
.
The mSet class and all the classes that inherits from it should contain at
least the four slots that were discussed below.
}
\section{Slots}{
\describe{
\item{\code{conc_table}}{A \code{\link{conc_table-class}} object that stores the
concentration information from the experiment. The column names should be
the feature IDs, and the rownames should be the row IDs. This should be a
numeric matrix.}
\item{\code{sample_table}}{A \code{\link{sample_table-class}} object that stores the
sample meta-data information. The row names should be sample IDs and
should match the column names of the conc_table.}
\item{\code{feature_data}}{A \code{\link{feature_data-class}} object that stores the
feature infromation during the experiment. The row names should be feature
IDs and should match the row names of the conc_table.}
\item{\code{experiment_data}}{This must be an object that inherits from the virtual
\code{\link{ExperimentData-class}}. This slot stores the experiment
information such as lab and instrument. It also varies among different
experiment types.}
}}
\author{
Chenghao Zhu
}
|
testlist <- list(x1 = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x2 = numeric(0), y1 = numeric(0), y2 = numeric(0))
result <- do.call(palm:::euc_distances,testlist)
str(result) | /palm/inst/testfiles/euc_distances/libFuzzer_euc_distances/euc_distances_valgrind_files/1612969062-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 284 | r | testlist <- list(x1 = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x2 = numeric(0), y1 = numeric(0), y2 = numeric(0))
result <- do.call(palm:::euc_distances,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{compute_contour_vals}
\alias{compute_contour_vals}
\title{compute contour vals for each pixel}
\usage{
compute_contour_vals(marks, now_rates, now_seeds)
}
\arguments{
\item{marks}{matrix of pixels}
\item{now_rates}{the rate of each voronoi tile}
\item{now_seeds}{the locations of each voronoi tile}
}
\value{
values vectoring storing the value for each pixel
}
\description{
compute one contour, by filling in each of the pixels/marks
}
| /man/compute_contour_vals.Rd | permissive | raonyguimaraes/plotmaps | R | false | true | 537 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{compute_contour_vals}
\alias{compute_contour_vals}
\title{compute contour vals for each pixel}
\usage{
compute_contour_vals(marks, now_rates, now_seeds)
}
\arguments{
\item{marks}{matrix of pixels}
\item{now_rates}{the rate of each voronoi tile}
\item{now_seeds}{the locations of each voronoi tile}
}
\value{
values vectoring storing the value for each pixel
}
\description{
compute one contour, by filling in each of the pixels/marks
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_spotify_api_token.R
\name{get_spotify_api_token}
\alias{get_spotify_api_token}
\title{Gets a Spotify API token}
\usage{
get_spotify_api_token(client_id, client_secret)
}
\arguments{
\item{client_id}{Your client id. Required.}
\item{client_secret}{Your client secret. Required.}
}
\value{
In case the request succeeded, a token to connect with the Spotify API. On failure, a message indicating that authentication failed.
}
\description{
Gets an OAuth2.0 token to connect with the Spotify API.
}
\seealso{
The vignette Connecting-with-the-Spotify-API
}
| /man/get_spotify_api_token.Rd | permissive | LennyyGH/spotidy | R | false | true | 635 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_spotify_api_token.R
\name{get_spotify_api_token}
\alias{get_spotify_api_token}
\title{Gets a Spotify API token}
\usage{
get_spotify_api_token(client_id, client_secret)
}
\arguments{
\item{client_id}{Your client id. Required.}
\item{client_secret}{Your client secret. Required.}
}
\value{
In case the request succeeded, a token to connect with the Spotify API. On failure, a message indicating that authentication failed.
}
\description{
Gets an OAuth2.0 token to connect with the Spotify API.
}
\seealso{
The vignette Connecting-with-the-Spotify-API
}
|
# multiMSE <- function(MOM, MPs = list(c("AvC","DCAC"),c("FMSYref","curE")),
# CheckMPs = FALSE, timelimit = 1, Hist=FALSE, ntrials=50, fracD=0.05, CalcBlow=FALSE,
# HZN=2, Bfrac=0.5, AnnualMSY=TRUE, silent=FALSE, PPD=FALSE, parallel=FALSE,
# save_name=NULL, checks=FALSE, control=NULL) {
#
#
# if (class(MOM)!='MOM') stop("OM object is not of class '<OM'", call. = FALSE)
#
# # Check MPs
# MPvec<-unlist(MPs)
# if (!all(is.na(MPvec))) {
# for (mm in MPvec) {
# chkMP <- try(get(mm), silent=TRUE)
# if (!(class(chkMP) %in% c('MP','MMP'))) stop(mm, " is not a valid MP", call.=FALSE)
# }
# }
#
# MSE1 <- multiMSE_int(MOM, MPs, CheckMPs, timelimit, Hist, ntrials, fracD, CalcBlow,
# HZN, Bfrac, AnnualMSY, silent, PPD, checks=checks, control=control)
#
# MSE1
# }
| /R/multiMSE_unused.R | no_license | DLMtool/multiMSE | R | false | false | 882 | r | # multiMSE <- function(MOM, MPs = list(c("AvC","DCAC"),c("FMSYref","curE")),
# CheckMPs = FALSE, timelimit = 1, Hist=FALSE, ntrials=50, fracD=0.05, CalcBlow=FALSE,
# HZN=2, Bfrac=0.5, AnnualMSY=TRUE, silent=FALSE, PPD=FALSE, parallel=FALSE,
# save_name=NULL, checks=FALSE, control=NULL) {
#
#
# if (class(MOM)!='MOM') stop("OM object is not of class '<OM'", call. = FALSE)
#
# # Check MPs
# MPvec<-unlist(MPs)
# if (!all(is.na(MPvec))) {
# for (mm in MPvec) {
# chkMP <- try(get(mm), silent=TRUE)
# if (!(class(chkMP) %in% c('MP','MMP'))) stop(mm, " is not a valid MP", call.=FALSE)
# }
# }
#
# MSE1 <- multiMSE_int(MOM, MPs, CheckMPs, timelimit, Hist, ntrials, fracD, CalcBlow,
# HZN, Bfrac, AnnualMSY, silent, PPD, checks=checks, control=control)
#
# MSE1
# }
|
## ----eval=FALSE----------------------------------------------------------
# # If you don't have it yet, install 'devtools':
# # install.packages('devtools')
# library(devtools)
# install_github('IVFL-BOKU/sentinel2')
## ------------------------------------------------------------------------
library(sentinel2)
S2_initialize_user(user = 'test@s2.boku.eodc.eu', password = 'test')
S2_check_access()
## ------------------------------------------------------------------------
S2_query_granule(atmCorr = TRUE,
geometry = c(x = 16.20, y = 48.15),
dateMin = '2016-09-15',
dateMax = '2016-09-30')
## ----eval = FALSE--------------------------------------------------------
# S2_buy_granule(granuleId = 1080943)
## ------------------------------------------------------------------------
S2_query_image(band = 'LAI',
utm = '33U',
dateMin = '2016-09-15',
dateMax = '2016-09-30')
## ----eval = FALSE--------------------------------------------------------
# S2_put_ROI(geometry = c(x=16, y=48),
# regionId = 'testROI',
# cloudCovMax = 20,
# dateMin = '2016-06-01',
# dateMax = '2016-07-01')
## ------------------------------------------------------------------------
S2_user_info()
## ------------------------------------------------------------------------
granules_owned <- S2_query_granule(owned = TRUE)
granules_owned
## ------------------------------------------------------------------------
save_names <- S2_generate_names(x = granules_owned)
save_names
## ----eval=FALSE----------------------------------------------------------
# S2_download(url = granules_owned$url, destfile = save_names)
## ------------------------------------------------------------------------
images = S2_query_image(owned = TRUE, band = 'B08', cloudCovMax = 85)
## ------------------------------------------------------------------------
save_names = paste0(images$date, '.', images$format)
## ----eval=FALSE----------------------------------------------------------
# S2_download(images$url, save_names)
## ----eval=FALSE----------------------------------------------------------
# # find some images and prepare file names
# images = S2_query_image(owned = TRUE, band = 'B08', cloudCovMax = 85)
# file_names = paste0(images$date, '.tif')
#
# # read the geometry from file
# geom = roi_to_jgeom('/my/path/my_geom_file.kml')
#
# # download them:
# # - reporojecting to WGS-84 (srid 4326)
# # - changing data format to Byte (0-255)
# # - dividing all values by 20 so they will better fit the Byte range
# # (and setting max value to 254 so it will not overlap with the no data value)
# # - cutting to the given geometry
# S2_download(images$url, file_names, srid = 4326, dataType = 'Byte', range = 50, max = 254, geometry = geom)
| /vignettes/introduction.R | no_license | intiluna/sentinel2 | R | false | false | 2,939 | r | ## ----eval=FALSE----------------------------------------------------------
# # If you don't have it yet, install 'devtools':
# # install.packages('devtools')
# library(devtools)
# install_github('IVFL-BOKU/sentinel2')
## ------------------------------------------------------------------------
library(sentinel2)
S2_initialize_user(user = 'test@s2.boku.eodc.eu', password = 'test')
S2_check_access()
## ------------------------------------------------------------------------
S2_query_granule(atmCorr = TRUE,
geometry = c(x = 16.20, y = 48.15),
dateMin = '2016-09-15',
dateMax = '2016-09-30')
## ----eval = FALSE--------------------------------------------------------
# S2_buy_granule(granuleId = 1080943)
## ------------------------------------------------------------------------
S2_query_image(band = 'LAI',
utm = '33U',
dateMin = '2016-09-15',
dateMax = '2016-09-30')
## ----eval = FALSE--------------------------------------------------------
# S2_put_ROI(geometry = c(x=16, y=48),
# regionId = 'testROI',
# cloudCovMax = 20,
# dateMin = '2016-06-01',
# dateMax = '2016-07-01')
## ------------------------------------------------------------------------
S2_user_info()
## ------------------------------------------------------------------------
granules_owned <- S2_query_granule(owned = TRUE)
granules_owned
## ------------------------------------------------------------------------
save_names <- S2_generate_names(x = granules_owned)
save_names
## ----eval=FALSE----------------------------------------------------------
# S2_download(url = granules_owned$url, destfile = save_names)
## ------------------------------------------------------------------------
images = S2_query_image(owned = TRUE, band = 'B08', cloudCovMax = 85)
## ------------------------------------------------------------------------
save_names = paste0(images$date, '.', images$format)
## ----eval=FALSE----------------------------------------------------------
# S2_download(images$url, save_names)
## ----eval=FALSE----------------------------------------------------------
# # find some images and prepare file names
# images = S2_query_image(owned = TRUE, band = 'B08', cloudCovMax = 85)
# file_names = paste0(images$date, '.tif')
#
# # read the geometry from file
# geom = roi_to_jgeom('/my/path/my_geom_file.kml')
#
# # download them:
# # - reporojecting to WGS-84 (srid 4326)
# # - changing data format to Byte (0-255)
# # - dividing all values by 20 so they will better fit the Byte range
# # (and setting max value to 254 so it will not overlap with the no data value)
# # - cutting to the given geometry
# S2_download(images$url, file_names, srid = 4326, dataType = 'Byte', range = 50, max = 254, geometry = geom)
|
#' @export
#' @title Additive-Mean-Standard-Deviation Portfolio Utility Function
#' @aliases AMSDP
#' @description Compute the utility function x \%*\% mp - gamma^theta * (t(x) \%*\% Cov \%*\% x)^(0.5 * theta) / theta for a portfolio x.
#' @param x a numeric n-vector representing a portfolio.
#' @param mp a numeric n-vector representing the mean payoff of each of the n assets.
#' @param Cov the n-by-n covariance matrix of the payoff vectors of n assets.
#' @param gamma a non-negative scalar representing the risk aversion coefficient with a default value of 1.
#' @param theta a non-negative scalar with a default value of 1.
#' @return A scalar indicating the utility level.
#' @references Danthine, J. P., Donaldson, J. (2005, ISBN: 9780123693808) Intermediate Financial Theory. Elsevier Academic Press.
#' @references Nakamura, Yutaka (2015) Mean-Variance Utility. Journal of Economic Theory, 160: 536-556.
#' @references Sharpe, William F (2008, ISBN: 9780691138503) Investors and Markets: Portfolio Choices, Asset Prices, and Investment Advice. Princeton University Press.
#' @references Xu Gao (2018, ISBN: 9787300258232) Twenty-five Lectures on Financial Economics. Beijing: China Renmin University Press. (In Chinese)
#' @seealso \code{\link{AMSD}}
#' @examples
#' \donttest{
#' UAP <- matrix(c(
#' 0, 1, 1,
#' 0, 2, 1,
#' 1, 1, 1,
#' 1, 2, 1,
#' 2, 0, 1
#' ), nrow = 5, byrow = TRUE)
#'
#' portfolio <- c(1.977, 1.183, 3.820)
#'
#' AMSDP(portfolio, colMeans(UAP),
#' cov.wt(UAP, method = "ML")$cov,
#' gamma = 1, theta = 1
#' )
#'
#' AMSD(UAP %*% portfolio, gamma = 1, theta = 1)
#' }
AMSDP <- function(x, mp, Cov, gamma = 1, theta = 1) {
x <- c(x)
result <- x %*% mp - gamma^theta * (t(x) %*% Cov %*% x)^(0.5 * theta) / theta
c(result)
}
| /R/AMSDP.R | no_license | cran/GE | R | false | false | 1,812 | r | #' @export
#' @title Additive-Mean-Standard-Deviation Portfolio Utility Function
#' @aliases AMSDP
#' @description Compute the utility function x \%*\% mp - gamma^theta * (t(x) \%*\% Cov \%*\% x)^(0.5 * theta) / theta for a portfolio x.
#' @param x a numeric n-vector representing a portfolio.
#' @param mp a numeric n-vector representing the mean payoff of each of the n assets.
#' @param Cov the n-by-n covariance matrix of the payoff vectors of n assets.
#' @param gamma a non-negative scalar representing the risk aversion coefficient with a default value of 1.
#' @param theta a non-negative scalar with a default value of 1.
#' @return A scalar indicating the utility level.
#' @references Danthine, J. P., Donaldson, J. (2005, ISBN: 9780123693808) Intermediate Financial Theory. Elsevier Academic Press.
#' @references Nakamura, Yutaka (2015) Mean-Variance Utility. Journal of Economic Theory, 160: 536-556.
#' @references Sharpe, William F (2008, ISBN: 9780691138503) Investors and Markets: Portfolio Choices, Asset Prices, and Investment Advice. Princeton University Press.
#' @references Xu Gao (2018, ISBN: 9787300258232) Twenty-five Lectures on Financial Economics. Beijing: China Renmin University Press. (In Chinese)
#' @seealso \code{\link{AMSD}}
#' @examples
#' \donttest{
#' UAP <- matrix(c(
#' 0, 1, 1,
#' 0, 2, 1,
#' 1, 1, 1,
#' 1, 2, 1,
#' 2, 0, 1
#' ), nrow = 5, byrow = TRUE)
#'
#' portfolio <- c(1.977, 1.183, 3.820)
#'
#' AMSDP(portfolio, colMeans(UAP),
#' cov.wt(UAP, method = "ML")$cov,
#' gamma = 1, theta = 1
#' )
#'
#' AMSD(UAP %*% portfolio, gamma = 1, theta = 1)
#' }
AMSDP <- function(x, mp, Cov, gamma = 1, theta = 1) {
x <- c(x)
result <- x %*% mp - gamma^theta * (t(x) %*% Cov %*% x)^(0.5 * theta) / theta
c(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/silicate.R
\name{sc_triangle}
\alias{sc_triangle}
\alias{sc_triangle.default}
\alias{sc_triangle.mesh3d}
\title{Silicate methods}
\usage{
sc_triangle(x, ...)
\method{sc_triangle}{default}(x, ...)
\method{sc_triangle}{mesh3d}(x, ...)
}
\arguments{
\item{x}{model}
\item{...}{passed to methods}
\item{prefer_triangles}{give triangles in preference to quads, if quads also present}
}
\description{
Silicate methods
}
| /man/sc_triangle.Rd | no_license | mdsumner/scrgl | R | false | true | 496 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/silicate.R
\name{sc_triangle}
\alias{sc_triangle}
\alias{sc_triangle.default}
\alias{sc_triangle.mesh3d}
\title{Silicate methods}
\usage{
sc_triangle(x, ...)
\method{sc_triangle}{default}(x, ...)
\method{sc_triangle}{mesh3d}(x, ...)
}
\arguments{
\item{x}{model}
\item{...}{passed to methods}
\item{prefer_triangles}{give triangles in preference to quads, if quads also present}
}
\description{
Silicate methods
}
|
#' Medical info on 100,000 randomly sampled births in the US in 2014
#'
#' The Centers for Disease Control collects data on all births registered
#' in the US (50 states + DC). The
#' full data set is provided by the `natality2014` package.
#' `Births_2014` is a simplified version of `natality_2014::Natality_2014_100k`
#' with some variables renamed for simplicity and some numerical variables converted
#' to categorical.
#'
#' @docType data
#' @name Births_2014
#'
#' @keywords datasets
#'
#' @format
#' A data frame with a random sample of size 100000 from the complete CDC set of 3,998,175 cases, each of which is a birth in the US in 2014.
#' \itemize{
#' \item{\code{age_mother}} {Mother's age at date of birth}
#' \item{\code{age_father}} {Father's age at date of birth}
#' \item{\code{induced}} {Was labor induced?}
#' \item{\code{ventilator}} {Baby put on mechanical ventilator immediately}
#' \item{\code{baby_wt}} {Baby's weight (gm)}
#' \item{\code{gestation}} {Length of gestation}
#' \item{\code{sex}} {Baby's sex}
#' \item{\code{plurality}} {Plurality of birth: 2 = twins, 3 = triplets, ...}
#' \item{\code{apgar5}} {APGAR score at 5 minutes}
#' \item{\code{pay}} {Source of payment for delivery}
#' \item{\code{delivery}} {Method of delivery}
#' \item{\code{induced}} {Labor induced}
#' \item{\code{mother_wt_before}} {Mother's weight before pregnancy}
#' \item{\code{mother_wt_delivery}} {Mother's weight at delivery}
#' \item{\code{mother_height}} {Mother's height}
#' \item{\code{cig_0}} {Number of cigarettes smoked daily before pregnancy}
#' \item{\code{cig_1}} {Number of cigarettes smoked daily during first trimester}
#' \item{\code{cig_2}} {Number of cigarettes smoked daily during second trimester}
#' \item{\code{cig_3}} {Number of cigarettes smoked daily during third trimester}
#' \item{\code{wic}} {Enrolled in Women, Infants, and Children (WIC) program for supplemental nutrition.}
#' \item{\code{month_start_prenatal}} {Month started in prenatal care. 15 means never started.}
#' \item{\code{prenatal_visits}} {Number of visits to prenatal care.}
#'
#' }
#'
#' @seealso \code{\link{Larger_natality_data_files}}
#'
"Births_2014"
| /R/Births_2014.R | no_license | dtkaplan/SDSdata | R | false | false | 2,256 | r | #' Medical info on 100,000 randomly sampled births in the US in 2014
#'
#' The Centers for Disease Control collects data on all births registered
#' in the US (50 states + DC). The
#' full data set is provided by the `natality2014` package.
#' `Births_2014` is a simplified version of `natality_2014::Natality_2014_100k`
#' with some variables renamed for simplicity and some numerical variables converted
#' to categorical.
#'
#' @docType data
#' @name Births_2014
#'
#' @keywords datasets
#'
#' @format
#' A data frame with a random sample of size 100000 from the complete CDC set of 3,998,175 cases, each of which is a birth in the US in 2014.
#' \itemize{
#' \item{\code{age_mother}} {Mother's age at date of birth}
#' \item{\code{age_father}} {Father's age at date of birth}
#' \item{\code{induced}} {Was labor induced?}
#' \item{\code{ventilator}} {Baby put on mechanical ventilator immediately}
#' \item{\code{baby_wt}} {Baby's weight (gm)}
#' \item{\code{gestation}} {Length of gestation}
#' \item{\code{sex}} {Baby's sex}
#' \item{\code{plurality}} {Plurality of birth: 2 = twins, 3 = triplets, ...}
#' \item{\code{apgar5}} {APGAR score at 5 minutes}
#' \item{\code{pay}} {Source of payment for delivery}
#' \item{\code{delivery}} {Method of delivery}
#' \item{\code{induced}} {Labor induced}
#' \item{\code{mother_wt_before}} {Mother's weight before pregnancy}
#' \item{\code{mother_wt_delivery}} {Mother's weight at delivery}
#' \item{\code{mother_height}} {Mother's height}
#' \item{\code{cig_0}} {Number of cigarettes smoked daily before pregnancy}
#' \item{\code{cig_1}} {Number of cigarettes smoked daily during first trimester}
#' \item{\code{cig_2}} {Number of cigarettes smoked daily during second trimester}
#' \item{\code{cig_3}} {Number of cigarettes smoked daily during third trimester}
#' \item{\code{wic}} {Enrolled in Women, Infants, and Children (WIC) program for supplemental nutrition.}
#' \item{\code{month_start_prenatal}} {Month started in prenatal care. 15 means never started.}
#' \item{\code{prenatal_visits}} {Number of visits to prenatal care.}
#'
#' }
#'
#' @seealso \code{\link{Larger_natality_data_files}}
#'
"Births_2014"
|
# The syntax for creating new binary operators in R is unlike anything else in
# R, but it allows you to define a new syntax for your function. I would only
# recommend making your own binary operator if you plan on using it often!
#
# User-defined binary operators have the following syntax:
# %[whatever]%
# where [whatever] represents any valid variable name.
#
# Let's say I wanted to define a binary operator that multiplied two numbers and
# then added one to the product. An implementation of that operator is below:
#
# "%mult_add_one%" <- function(left, right){ # Notice the quotation marks!
# left * right + 1
# }
#
# I could then use this binary operator like `4 %mult_add_one% 5` which would
# evaluate to 21.
#
# Write your own binary operator below from absolute scratch! Your binary
# operator must be called %p% so that the expression:
#
# "Good" %p% "job!"
#
# will evaluate to: "Good job!"
"%p%" <- function(firstarg, secondarg ){ # Remember to add arguments!
paste(firstarg, secondarg, sep = " ")
}
| /bin_op.R | no_license | dateshaile/R-Programming | R | false | false | 1,042 | r | # The syntax for creating new binary operators in R is unlike anything else in
# R, but it allows you to define a new syntax for your function. I would only
# recommend making your own binary operator if you plan on using it often!
#
# User-defined binary operators have the following syntax:
# %[whatever]%
# where [whatever] represents any valid variable name.
#
# Let's say I wanted to define a binary operator that multiplied two numbers and
# then added one to the product. An implementation of that operator is below:
#
# "%mult_add_one%" <- function(left, right){ # Notice the quotation marks!
# left * right + 1
# }
#
# I could then use this binary operator like `4 %mult_add_one% 5` which would
# evaluate to 21.
#
# Write your own binary operator below from absolute scratch! Your binary
# operator must be called %p% so that the expression:
#
# "Good" %p% "job!"
#
# will evaluate to: "Good job!"
"%p%" <- function(firstarg, secondarg ){ # Remember to add arguments!
paste(firstarg, secondarg, sep = " ")
}
|
source("correlation_features.R")
run_features <- function(){
b_quick = T
if (b_quick) {
filename="../data/train_1/1_999_0.mat"
df_1 <- process_file_windows_single(filename)
filename="../data/train_1/1_1_1.mat"
df_2 <- process_file_windows_single(filename)
df <- rbind(df_1, df_2)
} else {
window_size = 30
output_filename=sprintf("../data/features/train_2_window_%s_secs_correlation_and_fft.testing.txt", window_size)
df <- process_windows_parallel(cores=4,
inputdir="../data/train_2/",
output_filename=output_filename,
secs_per_window=window_size)
}
}
run_features.train <- function(){
for (patient_id in c(1)){
print(sprintf("Generating features for patient_id : %s", patient_id))
window_size = 5
output_filename=sprintf("../data/features/train_%s_window_%s_secs_correlation_and_fft.testing.txt", patient_id, window_size)
print(sprintf("Generating : %s", output_filename))
df <- process_windows_parallel(cores=4,
inputdir=sprintf("../data/train_%s/", patient_id),
output_filename=output_filename,
secs_per_window=window_size)
}
}
run_features.test <- function(){
for (patient_id in c(1)){
print(sprintf("Generating features for patient_id : %s", patient_id))
window_size = 5
output_filename=sprintf("../data/features/test_%s_new_window_%s_secs_correlation_and_fft.testing.txt", patient_id, window_size)
print(sprintf("Generating : %s", output_filename))
df <- process_windows_parallel(cores=4,
inputdir=sprintf("../data/test_%s_new/", patient_id),
output_filename=output_filename,
secs_per_window=window_size)
}
}
print("Running : run_features.train")
# run_features.train()
print("Running : run_features.test")
# run_features.test()
| /R/calc_correlation_features.R | no_license | telvis07/kaggle-melbourne-university-seizure-prediction | R | false | false | 2,068 | r | source("correlation_features.R")
run_features <- function(){
b_quick = T
if (b_quick) {
filename="../data/train_1/1_999_0.mat"
df_1 <- process_file_windows_single(filename)
filename="../data/train_1/1_1_1.mat"
df_2 <- process_file_windows_single(filename)
df <- rbind(df_1, df_2)
} else {
window_size = 30
output_filename=sprintf("../data/features/train_2_window_%s_secs_correlation_and_fft.testing.txt", window_size)
df <- process_windows_parallel(cores=4,
inputdir="../data/train_2/",
output_filename=output_filename,
secs_per_window=window_size)
}
}
run_features.train <- function(){
for (patient_id in c(1)){
print(sprintf("Generating features for patient_id : %s", patient_id))
window_size = 5
output_filename=sprintf("../data/features/train_%s_window_%s_secs_correlation_and_fft.testing.txt", patient_id, window_size)
print(sprintf("Generating : %s", output_filename))
df <- process_windows_parallel(cores=4,
inputdir=sprintf("../data/train_%s/", patient_id),
output_filename=output_filename,
secs_per_window=window_size)
}
}
run_features.test <- function(){
for (patient_id in c(1)){
print(sprintf("Generating features for patient_id : %s", patient_id))
window_size = 5
output_filename=sprintf("../data/features/test_%s_new_window_%s_secs_correlation_and_fft.testing.txt", patient_id, window_size)
print(sprintf("Generating : %s", output_filename))
df <- process_windows_parallel(cores=4,
inputdir=sprintf("../data/test_%s_new/", patient_id),
output_filename=output_filename,
secs_per_window=window_size)
}
}
print("Running : run_features.train")
# run_features.train()
print("Running : run_features.test")
# run_features.test()
|
# Correct hand entry mistakes, and missing information
### INITIALISE ----
# load library
library(tidyverse)
library(here)
# library(validate)
library(openxlsx)
library(readxl)
# load functions from pIndex-00-cache.R
path2cache <- here("spreadsheets/cache/pIndex/")
# read dataframe
index1 <- read_csv(paste0(path2cache, "pIndex-01-1987to1992.csv"))
### FUZZY MATCHES ----
## initialise
l.unique <- list()
# u. upper case, low. lower case, uc. unique corrections
list_unique <- function(df, var) {
col_name <- deparse(substitute(var))
col_vals <- eval(substitute(df))[[col_name]]
unique_names <- col_vals %>% unique() %>% sort() %>% as.tibble()
unique_names[[2]] <- str_to_upper(unique_names[[1]])
unique_names[[3]] <- str_to_lower(unique_names[[1]])
unique_names[[4]] <- unique_names[[3]]
colnames(unique_names) <- c(col_name, paste0("u.", col_name), paste0("low.", col_name), paste0("uc.", col_name))
l.unique[[col_name]] <<- unique_names
}
l.fuzzy_match1 <- list()
fuzzy_match1 <- function(df, var, colno) {
col_name <- deparse(substitute(var))
unique_names <- eval(substitute(df))[[col_name]][[colno]]
n_pairs <- (length(unique_names) - 1)
v.agrep <- c()
for (i in c(1:n_pairs)) {
v.agrep[i] <- agrepl(unique_names[i], unique_names[i + 1])
}
agrep_TRUE <- c()
for (i in which(v.agrep == TRUE)) {
agrep_TRUE <- rbind(agrep_TRUE, c(i, str_to_lower(unique_names[i]), i + 1, str_to_lower(unique_names[i + 1])))
# print(i:(i + 1))
# print(unique_names[i:(i + 1)])
}
# l.agrep[[col_name]] <<- v.agrep
table.matches <- agrep_TRUE[-1,1:4]
colnames(table.matches) <- c("index1", "name1", "index2", "name2")
l.fuzzy_match1[[col_name]] <<- as.tibble(table.matches)
}
## List unique company, product names
list_unique(index1, company)
list_unique(index1, product)
l.unique
path2corrections <- paste0(path2cache, "corrections-pIndex01-unique.xlsx")
write.xlsx(l.unique, file = path2corrections)
## Run fuzzy match
fuzzy_match1(l.unique, company, 1)
fuzzy_match1(l.unique, product, 1)
l.fuzzy_match1
write.xlsx(l.fuzzy_match1, file = paste0(path2cache, "reference-pIndex01-fuzzy_match1.xlsx"))
### READ IN CORRECTIONS ----
wbook <- paste0(path2cache, "corrections-pIndex01-unique.xlsx")
lookup2index2 <- wbook %>%
excel_sheets() %>%
set_names() %>%
map(read_excel, path = wbook)
index2_cache_allvars <- index1 %>%
left_join(lookup2index2$company, by = "company") %>%
left_join(lookup2index2$product, by = "product")
index2_cache_allvars %>% write_csv(paste0(path2cache, "pIndex-01-allvars.csv"))
index2 <- index2_cache_allvars %>%
mutate(company = str_to_upper(uc.company),
product = str_to_upper(uc.product),
product_brand = str_to_upper(brand)) %>%
select(-one_of(c("u.company", "correction 1", "uc.company", "low.company", "u.product", "low.product", "uc.product", "brand note", "brand")))
index2 %>% write_csv(paste0(path2cache, "pIndex-02-1987to1992.csv"))
## TODO: check product name matches with same price year (1987), but different prices
# use unique_product && year == 1987 to subset products, then price[i] == price[i + 1] for i:length[subset -1]
| /scripts/pIndex-01-correct_names.R | permissive | cynthiahqy/dataset-pcmag | R | false | false | 3,176 | r | # Correct hand entry mistakes, and missing information
### INITIALISE ----
# load library
library(tidyverse)
library(here)
# library(validate)
library(openxlsx)
library(readxl)
# load functions from pIndex-00-cache.R
path2cache <- here("spreadsheets/cache/pIndex/")
# read dataframe
index1 <- read_csv(paste0(path2cache, "pIndex-01-1987to1992.csv"))
### FUZZY MATCHES ----
## initialise
l.unique <- list()
# u. upper case, low. lower case, uc. unique corrections
list_unique <- function(df, var) {
col_name <- deparse(substitute(var))
col_vals <- eval(substitute(df))[[col_name]]
unique_names <- col_vals %>% unique() %>% sort() %>% as.tibble()
unique_names[[2]] <- str_to_upper(unique_names[[1]])
unique_names[[3]] <- str_to_lower(unique_names[[1]])
unique_names[[4]] <- unique_names[[3]]
colnames(unique_names) <- c(col_name, paste0("u.", col_name), paste0("low.", col_name), paste0("uc.", col_name))
l.unique[[col_name]] <<- unique_names
}
l.fuzzy_match1 <- list()
fuzzy_match1 <- function(df, var, colno) {
col_name <- deparse(substitute(var))
unique_names <- eval(substitute(df))[[col_name]][[colno]]
n_pairs <- (length(unique_names) - 1)
v.agrep <- c()
for (i in c(1:n_pairs)) {
v.agrep[i] <- agrepl(unique_names[i], unique_names[i + 1])
}
agrep_TRUE <- c()
for (i in which(v.agrep == TRUE)) {
agrep_TRUE <- rbind(agrep_TRUE, c(i, str_to_lower(unique_names[i]), i + 1, str_to_lower(unique_names[i + 1])))
# print(i:(i + 1))
# print(unique_names[i:(i + 1)])
}
# l.agrep[[col_name]] <<- v.agrep
table.matches <- agrep_TRUE[-1,1:4]
colnames(table.matches) <- c("index1", "name1", "index2", "name2")
l.fuzzy_match1[[col_name]] <<- as.tibble(table.matches)
}
## List unique company, product names
list_unique(index1, company)
list_unique(index1, product)
l.unique
path2corrections <- paste0(path2cache, "corrections-pIndex01-unique.xlsx")
write.xlsx(l.unique, file = path2corrections)
## Run fuzzy match
fuzzy_match1(l.unique, company, 1)
fuzzy_match1(l.unique, product, 1)
l.fuzzy_match1
write.xlsx(l.fuzzy_match1, file = paste0(path2cache, "reference-pIndex01-fuzzy_match1.xlsx"))
### READ IN CORRECTIONS ----
wbook <- paste0(path2cache, "corrections-pIndex01-unique.xlsx")
lookup2index2 <- wbook %>%
excel_sheets() %>%
set_names() %>%
map(read_excel, path = wbook)
index2_cache_allvars <- index1 %>%
left_join(lookup2index2$company, by = "company") %>%
left_join(lookup2index2$product, by = "product")
index2_cache_allvars %>% write_csv(paste0(path2cache, "pIndex-01-allvars.csv"))
index2 <- index2_cache_allvars %>%
mutate(company = str_to_upper(uc.company),
product = str_to_upper(uc.product),
product_brand = str_to_upper(brand)) %>%
select(-one_of(c("u.company", "correction 1", "uc.company", "low.company", "u.product", "low.product", "uc.product", "brand note", "brand")))
index2 %>% write_csv(paste0(path2cache, "pIndex-02-1987to1992.csv"))
## TODO: check product name matches with same price year (1987), but different prices
# use unique_product && year == 1987 to subset products, then price[i] == price[i + 1] for i:length[subset -1]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nareit.R
\name{get_data_reit_funds}
\alias{get_data_reit_funds}
\title{REIT fund manager data.}
\usage{
get_data_reit_funds(parse_fund_details = TRUE, return_message = TRUE)
}
\arguments{
\item{parse_fund_details}{\code{TRUE} parses fund details}
\item{return_message}{\code{TRUE} return a message after data import}
}
\value{
\code{data_frame}
}
\description{
This function acquires information for
REIT fund vehicles
}
\examples{
\dontrun{
get_data_reit_funds(parse_fund_details = TRUE, return_message = TRUE))
}
}
\seealso{
Other NAREIT: \code{\link{get_data_nareit_annual_subsector_returns}},
\code{\link{get_data_nareit_capital_raises}},
\code{\link{get_data_nareit_constituent_years}},
\code{\link{get_data_nareit_entities}},
\code{\link{get_data_nareit_industry_tracker}},
\code{\link{get_data_nareit_mergers_acquisitions}},
\code{\link{get_data_nareit_monthly_returns}},
\code{\link{get_data_nareit_notable_properties}},
\code{\link{get_data_nareit_property_msa}},
\code{\link{get_data_nareit_state_info}},
\code{\link{get_reit_entity_dictionary}}
Other entity search: \code{\link{get_data_adv_managers_current_period_summary}},
\code{\link{get_data_adv_managers_filings}},
\code{\link{get_data_adv_managers_metadata}},
\code{\link{get_data_adv_managers_periods_summaries}},
\code{\link{get_data_finra_entities}},
\code{\link{get_data_nareit_entities}},
\code{\link{get_data_rf_leis}},
\code{\link{get_data_rf_sec_13F_companies}},
\code{\link{get_data_sec_bankruptcies}},
\code{\link{get_data_sec_broker_dealers}},
\code{\link{get_data_sec_ciks}},
\code{\link{get_data_sec_closed_end_funds}},
\code{\link{get_data_sec_cusips}},
\code{\link{get_data_sec_filer}},
\code{\link{get_data_sec_filing_entities}},
\code{\link{get_data_sec_investment_companies}},
\code{\link{get_data_sec_money_market_funds}},
\code{\link{get_data_sec_municipal_advisors}},
\code{\link{get_data_securities_offerings}},
\code{\link{get_data_us_public_companies}},
\code{\link{get_data_ycombinator_alumni}}
Other fund data: \code{\link{get_data_adv_managers_current_period_summary}},
\code{\link{get_data_adv_managers_filings}},
\code{\link{get_data_adv_managers_metadata}},
\code{\link{get_data_adv_managers_periods_summaries}},
\code{\link{get_data_sec_ciks}},
\code{\link{get_data_sec_closed_end_funds}},
\code{\link{get_data_sec_money_market_funds}}
}
| /man/get_data_reit_funds.Rd | permissive | ajiang38740/fundManageR | R | false | true | 2,490 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nareit.R
\name{get_data_reit_funds}
\alias{get_data_reit_funds}
\title{REIT fund manager data.}
\usage{
get_data_reit_funds(parse_fund_details = TRUE, return_message = TRUE)
}
\arguments{
\item{parse_fund_details}{\code{TRUE} parses fund details}
\item{return_message}{\code{TRUE} return a message after data import}
}
\value{
\code{data_frame}
}
\description{
This function acquires information for
REIT fund vehicles
}
\examples{
\dontrun{
get_data_reit_funds(parse_fund_details = TRUE, return_message = TRUE))
}
}
\seealso{
Other NAREIT: \code{\link{get_data_nareit_annual_subsector_returns}},
\code{\link{get_data_nareit_capital_raises}},
\code{\link{get_data_nareit_constituent_years}},
\code{\link{get_data_nareit_entities}},
\code{\link{get_data_nareit_industry_tracker}},
\code{\link{get_data_nareit_mergers_acquisitions}},
\code{\link{get_data_nareit_monthly_returns}},
\code{\link{get_data_nareit_notable_properties}},
\code{\link{get_data_nareit_property_msa}},
\code{\link{get_data_nareit_state_info}},
\code{\link{get_reit_entity_dictionary}}
Other entity search: \code{\link{get_data_adv_managers_current_period_summary}},
\code{\link{get_data_adv_managers_filings}},
\code{\link{get_data_adv_managers_metadata}},
\code{\link{get_data_adv_managers_periods_summaries}},
\code{\link{get_data_finra_entities}},
\code{\link{get_data_nareit_entities}},
\code{\link{get_data_rf_leis}},
\code{\link{get_data_rf_sec_13F_companies}},
\code{\link{get_data_sec_bankruptcies}},
\code{\link{get_data_sec_broker_dealers}},
\code{\link{get_data_sec_ciks}},
\code{\link{get_data_sec_closed_end_funds}},
\code{\link{get_data_sec_cusips}},
\code{\link{get_data_sec_filer}},
\code{\link{get_data_sec_filing_entities}},
\code{\link{get_data_sec_investment_companies}},
\code{\link{get_data_sec_money_market_funds}},
\code{\link{get_data_sec_municipal_advisors}},
\code{\link{get_data_securities_offerings}},
\code{\link{get_data_us_public_companies}},
\code{\link{get_data_ycombinator_alumni}}
Other fund data: \code{\link{get_data_adv_managers_current_period_summary}},
\code{\link{get_data_adv_managers_filings}},
\code{\link{get_data_adv_managers_metadata}},
\code{\link{get_data_adv_managers_periods_summaries}},
\code{\link{get_data_sec_ciks}},
\code{\link{get_data_sec_closed_end_funds}},
\code{\link{get_data_sec_money_market_funds}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_data.R
\name{get_data}
\alias{get_data}
\title{Retrieve sensor or annotation data from package contents}
\usage{
get_data(plot_name, type)
}
\arguments{
\item{plot_name}{A plot name.}
\item{type}{Which data object should be returned:
"rgb" for camera imagery
"chm" for a canopy height raster
"lidar" for 3D point clouds,
"hyperspectral" for 426 band raster,
"annotations" for dataframe of bounding box ground truth.}
}
\value{
The filename of the object.
}
\description{
\code{get_data} is a set of utility functions for finding the path of benchmark data on disk
}
\examples{
path <- get_data("SJER_052", "lidar")
}
| /man/get_data.Rd | no_license | weecology/NeonTreeEvaluation_package | R | false | true | 700 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_data.R
\name{get_data}
\alias{get_data}
\title{Retrieve sensor or annotation data from package contents}
\usage{
get_data(plot_name, type)
}
\arguments{
\item{plot_name}{A plot name.}
\item{type}{Which data object should be returned:
"rgb" for camera imagery
"chm" for a canopy height raster
"lidar" for 3D point clouds,
"hyperspectral" for 426 band raster,
"annotations" for dataframe of bounding box ground truth.}
}
\value{
The filename of the object.
}
\description{
\code{get_data} is a set of utility functions for finding the path of benchmark data on disk
}
\examples{
path <- get_data("SJER_052", "lidar")
}
|
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
data <- read.table(unz(temp, "household_power_consumption.txt"),header = TRUE, sep = ";",na.strings = "?")
unlink(temp)
data$Time<-strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
data$Date<-as.Date(data$Date,"%d/%m/%Y")
df<-subset(data,Date==as.Date("2007-02-01")|Date==as.Date("2007-02-02"))
png("plot3.png", width=480, height=480)
plot(df$Time,df$Sub_metering_1,
type="l",
ylab="Energy sub metering",
xlab="")
lines(df$Time,df$Sub_metering_2,col="red")
lines(df$Time,df$Sub_metering_3,col="blue")
legend("topright",
col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=1)
dev.off()
| /plot3.R | no_license | philipmustang/ExData_Plotting1 | R | false | false | 800 | r | temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
data <- read.table(unz(temp, "household_power_consumption.txt"),header = TRUE, sep = ";",na.strings = "?")
unlink(temp)
data$Time<-strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
data$Date<-as.Date(data$Date,"%d/%m/%Y")
df<-subset(data,Date==as.Date("2007-02-01")|Date==as.Date("2007-02-02"))
png("plot3.png", width=480, height=480)
plot(df$Time,df$Sub_metering_1,
type="l",
ylab="Energy sub metering",
xlab="")
lines(df$Time,df$Sub_metering_2,col="red")
lines(df$Time,df$Sub_metering_3,col="blue")
legend("topright",
col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=1)
dev.off()
|
14cf7f547ddcd5bf6c48136d7c3343c4 c4_BMC_p2_k2.qdimacs 304 774 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Mangassarian-Veneris/BMC/c4_BMC_p2_k2/c4_BMC_p2_k2.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 61 | r | 14cf7f547ddcd5bf6c48136d7c3343c4 c4_BMC_p2_k2.qdimacs 304 774 |
profileBySubHull <- function(fixedlist=NA, otherlist=NULL, extrap=F, locchull=NULL,
templateinkg=blackbox.getOption("rosglobal")$par, ##but another $par can be passed if this one is not in locchull
subHull_V_and_H,
max.only=T, ##this is for generateInitpts()
usezoom=F
) { ## Nb in metric units for metric plot.
## Likelihood profile POINT for fixed values of variables fixed, taking Out all variables not specified in fixed
## return value is list(CANON, purefn(canon), ......................)
## Values may be specified for fixed vars and for other variables.
## For the fixed variables, these values are mandatory (they specify for which value the profile is computed).
## For the other variables, these values are optional (they only propose starting values for the optimization algorithms),
## starting values WERE taken from maximizing values (canonVPoutput) if no value is explictly given.
## NOW trying to use convex hull as much as possible (see call to subchullWrapper() )
## Returns NA's for the canonical vector if bas>haut... which is lethal for
## all minimizations algos that somehow use the distance between points
## optimization: starting from rosglobal may miss the profile maximum in some cases;
## In that case, starting from the maximum for previous Nb helps producing smooth figures.
fittedNames <- blackbox.getOption("fittedNames")
## first some checks of the arguments
if ("Nb" %in% c(names(otherlist), names(fixedlist))) {
message.redef(c("(!) From profile(): 'Nb' in function argument is highly suspect"))
stop.redef()
}
notinKgspace <- names(fixedlist) %w/o% fittedNames
checknames <- notinKgspace %w/o% c(blackbox.getOption("ParameterNames"), "latt2Ns2", "Nratio", "NactNfounderratio", "NfounderNancratio")
if (length(checknames)>0) {
message.redef(c("Error: incorrect members ", checknames, " of fixedlist argument to profile() function"))
stop.redef()
} ## ELSE:
## Check that there are values for the 'fixed' variables:
if (any( ! is.numeric(as.numeric(fixedlist)))) {
message.redef("Error: no value for some fixed variable in profile() function:")
print(fixedlist)
return(list(message="Error: no value for some fixed variable in profile() function."))
}
## For extra composite variables
## we can test within-hullness in convex hull of Nb-transformed data,
## which does not make a convex hull in kriging space
## but would have been the hull if we had kriged in 2Ds2-space
###### => computing the hull for extra composite space
## do this only once in the whole FONKgpointls analysis for a given variable
if(is.null(locchull)) locchull <- providefullhull(names(fixedlist))[[1]] ## this works also without any composite variable
## since it comes from providefullhull it must have the affectedConstraints component
## control of test of within subhullness
if (length(notinKgspace)>0) { ## then default is as given in next line else it is double
if ( blackbox.getOption("redundant.mode")=="defaultPrecision") {
precision <- "double"
} else {precision <- blackbox.getOption("redundant.mode")}
} else precision <- "double"
######
## Now we know the variables of locchull ('full' dimensional, e.g. 2Nmu, 2Ds2, g), and this must exactly be those of bestProfiledOut + fixedlist !
## we first look whether there is something to profile
## It is much more efficient to add constraint to existing constraints $Hrep than using the vertices
if (missing(subHull_V_and_H)) subHull_V_and_H <- subHullWrapper(vertices=locchull$vertices, equality=fixedlist,
include.Hrepr=TRUE,
precision=precision ## FR->FR argument added 10/2015
)
subvertices <- subHull_V_and_H$vertices
if (is.null(subvertices)) { ## called when no point satisfy the constraints: safe exit from profile() with info
# suggests that testPoint is out of kriged points...
paramnbr <- blackbox.getOption("paramnbr")
return(list(fixedlist=fixedlist,
full=rep(NA, paramnbr),
par=rep(NA, paramnbr),
message="safe exit from profile() because is.null(subvertices)",
value=NA, ## NA is important else -Inf -> numeric RelLik value -> plotted.
inKgspace=FALSE,
subHull_V_and_H=subHull_V_and_H)
)
} ## null subvertices exit
## else
subverticesmax <- apply(subvertices, 2, max)
subverticesmin <- apply(subvertices, 2, min)
localmaxrange <- subverticesmax-subverticesmin
if (nrow(subvertices)==1 ##point unique: 'profile' is easily computed
|| max(localmaxrange)<0.000001 ## several rows but very close to each over
## so we are at the edge of the hull and numerical problems could occur
## + we don't need more precision
## FR->FR replaced 0.00001 by 0.000001 03/2011... very ad hoc anyway
) { ##point unique: 'profile' is easily computed
point <- colMeans(subvertices) ## don't lose colnames !
vkrig <- tofullKrigingspace(point, fixedlist) ## conversion to full kriging param space
canon <- canonize(vkrig)$canonVP ## completion/conversion to canonical
if (length(notinKgspace)>0 || extrap==T) { ## the two cases where we don't already know the point to be in Kgspace
inKgspace <- isPointInCHull(vkrig, constraints=blackbox.getOption("hulls")$Kgtotal[c("a", "b")])
} else inKgspace <- TRUE
return(list(message="", full=canon, par=point, value=purefn(vkrig, testhull=FALSE), subHull_V_and_H=subHull_V_and_H, inKgspace=inKgspace))
} ## ELSE : non trivial profiling to perform
#######################################################################
## currently (10/2011) precision controls
## (1) the call to subchullWrapper -> subchull -> redundant (but not the returnType which was always floating point)
## (2) whether constrOptim or constrOptimR is called...
## note that some code in subchull remains in double precision because there in no rational version of addHeq
## within hull if ui.x-ci>0 and Ax-b<0 i.e (-A)x+b>0 ie ui=(-A)= cols[-(1:2)] of the hrepr, ci= - col2
## subHullWrapper renvoit $Hrepr= qui inclut en colonnes b=-ci, et ui
ui <- subHull_V_and_H$Hrepr[, -c(1, 2), drop=FALSE]
ci <- - subHull_V_and_H$Hrepr[, 2] ## within-hullness occurs for ui %*% x -ci => 0
#### FR->FR a perhaps better way would be that subchullWrapper returns vertices as floating point, and constraints as <precision>,
if (precision=="rational") {ui <- d2q(ui);ci <- d2q(ci)}
## note that subvertices was computed in double precision because there in no rational version of addHeq
## We need to find a starting point for maximization
##see the subvertices as a disk in 3D space
## if roglobal is close to the inside of a face
## a vector inside the disk deduced heuristically from rosglobal and otherlist can be a good start point
## we construct such a point, unsure to be in the convex hull
## next we consider the case were the likelihood can be maximized on the edge of the coin
## finally we explore the face of the coin
profiledOutPars <- colnames(locchull$vertices) %w/o% names(fixedlist)
### first the unsure vector
bestProfiledOut <- as.numeric(array(NA, length(profiledOutPars)))
names(bestProfiledOut) <- profiledOutPars
### first the unsure vector
locnames <- intersect(profiledOutPars, fittedNames)
bestProfiledOut[locnames] <- templateinkg[locnames] ## rosglobal by default
## note that the scale of the hull should already be that of kriging, no additional log transf should be needed ##
## otherlist overrides the previous values
if (!is.null(otherlist)) {
locnames <- intersect(profiledOutPars, names(otherlist))
bestProfiledOut[locnames] <- unlist(otherlist[locnames])
## note that in current use otherlist values come from a grid generated in Kriging space, so again no additional log transf should be needed ##
}
if (any(is.na(bestProfiledOut))) {
message.redef("(!) any(is.na(bestProfiledOut))")
message.redef(paste("fixedlist was", fixedlist))
message.redef(paste("otherlist was", otherlist))
message.redef(paste("colnames(locchull$vertices) were", colnames(locchull$vertices)))
}
################
## designed to construct safe starting point(s) from an unsafe one
candidates <- generateInitpts(bestProfiledOut=bestProfiledOut, vertices=subvertices, ui=ui, ci=ci, hrepr=subHull_V_and_H$Hrepr,
fixedlist=fixedlist,
precision=precision, max.only=max.only)
# candidates <- generateInitpts(bestProfiledOut=bestProfiledOut, locsubchull=subvertices, ui=ui, ci=ci, fixedlist=fixedlist,
# precision=precision, max.only=max.only)
################
if (usezoom) { ## then additionally constructs another starting point (found useful at least once for a local maximum)
## a slow but more general methodn that was apparently useful for a LRT on latt2Ns2 when for kriging on condS2 and Nm is better estimated than 2Ns2
## usezoom currently true only in LRTfn -> profile
cP <- zoomProfile(fixedlist=fixedlist, extrap=extrap, locchull=locchull,
templateinkg=templateinkg, ## conversion to local hull within the function
precision=precision ##FR->FR NO NEED FOR RATIONAL HERE ??
) ## returns in canonical space
bestProfiledOut <- fromFONKtoanyspace(tofullKrigingspace(cP$par),
colnames(locchull$vertices))[profiledOutPars]
################
## designed to construct safe starting point(s) from an unsafe one
candidateszoom <- generateInitpts(bestProfiledOut=bestProfiledOut, vertices=subvertices, ui=ui, ci=ci, hrepr=subHull_V_and_H$Hrepr,
fixedlist=fixedlist,
precision=precision, max.only=max.only)
# candidateszoom <- generateInitpts(bestProfiledOut=bestProfiledOut, locsubchull=subvertices, ui=ui, ci=ci, fixedlist=fixedlist,
# precision=precision, max.only=max.only)
################
candidates <- c(candidates, candidateszoom)
}
## end of construction of bestProfiledOut
### Finally we have (a set of) candidate(s) in the hull of the profile. We maximize
if ("NLOPT_LN_COBYLA" %in% blackbox.getOption("optimizers")) {
## minimization of - logL
objfn_nloptr <- function(x,ui,ci) { ## all functions should have the same args. ui and ci will be ignored in this fn
names(x) <- names(bestProfiledOut) ## nloptr tends to lose names
return(- tofKpredict.nohull(x, fixedlist=fixedlist))
}
eval_g_ineq <- function(x,ui,ci) {max(ci - ui %*% x)} ## must be <0
try_nloptr_wrap <- function(...) {
resu <- try(nloptr(...))
if (! inherits(resu,"try-error")) {
resu$par <-resu$solution
names(resu$par) <- names(bestProfiledOut)
resu$value <- - resu$objective # back to + logL
}
return(resu)
}
} else {
## maximization of objfn = + logL (control$fnscale=-1)
objfn <- function(x) {tofKpredict.nohull(x, fixedlist=fixedlist)} ## the simple way to pass fixedlist to the inner computations...
objfn.grad <- function(x) {grad(func=objfn, x=x)} ## no need to specify fixedlist here
}
## initial value in 'vmin' (or 'vmmin') is not finite: function fun:=R(theta, theta.old, ...) may have returned Inf which means it considers the point is not in the hull represented by ui, ci
control <- list(fnscale = -1/blackbox.getOption("scalefactor"), trace = FALSE, maxit = 10000)
parscale <- localmaxrange
control <- c(control, list(parscale=parscale/blackbox.getOption("scalefactor"))) ## don't forget scalefactor here as it's in fnscale...
bestresu <- list(value=- Inf)
for (candidate in candidates) {
bestProfiledOut <- candidate$par
if (is.matrix(bestProfiledOut)) stop.redef("(!) is.matrix(bestProfiledOut)")
if (is.null(names(bestProfiledOut))) stop.redef("(!) is.null(names(bestProfiledOut))")
if (precision=="rational") {
resu <- try(constrOptimR(unlist(bestProfiledOut), objfn, grad=objfn.grad, ui=ui, ci=ci , mu=1e-08, ## a low mu appear important
method = "BFGS",
control = control)
)
} else {
if ("NLOPT_LN_COBYLA" %in% blackbox.getOption("optimizers")) { ## not efficient
resu <- try_nloptr_wrap(x0=unlist(bestProfiledOut),eval_f=objfn_nloptr,eval_g_ineq=eval_g_ineq,ui=ui,ci=ci,
opts=list(algorithm="NLOPT_LN_COBYLA",xtol_rel=1.0e-5,maxeval=-1)) ## BOBYQA = only BOund constraints
## dans constrOptim, le gradient est essentiel pour que l'optimisation contrainte marche
## Un constrOptim qui appelle NLOPT devrait donc utiliser une method avec gradient
} else {
resu <- try(constrOptim(unlist(bestProfiledOut), objfn, grad=objfn.grad, ui=ui, ci=ci , mu=1e-08, ## a low mu appear important
# localmaxrange=localmaxrange, ## arg de tofKpredict.nohull.grad
method = "BFGS",
# fixedlist = fixedlist,
control = control)
)
}
}
if (! inherits(resu,"try-error") && resu$value>bestresu$value) bestresu <- resu
} ## end loop over candidate starting points
if (bestresu$value==-Inf) {
## parameter value a edge or outside kriged range ? Seems to work most of the time, but far from always, when at edge
FONKgNames <- blackbox.getOption("FONKgNames")
zut <- list(full=rep(NA, length(FONKgNames)), par=rep(NA, length(FONKgNames)), value=NA, message="no valid constrOptim(R) return value within profile().",
inKgspace=F,
edgelevel=0, edge=list(), subHull_V_and_H=subHull_V_and_H)
return(zut)
}
# ELSE
resu <- bestresu
## One problem, however, is that such algos are a bit stuck when they meet a face of the hull
## so we will find whether this is so, and if so we will optimize on the face found
## (it is still possible that we are on a wrong face... => rr=0 or 1...)
fullerhull <- matchVertCons(locchull) ## In grid calls, time will be saved if locchull already contains the affectedConstraints.
edgecheck <- handlesPointAtEdge(point=resu$par, fullerhull=fullerhull, fixedlist=fixedlist)
if (!is.null(edgecheck$edge)) {
locedge <- t(apply(edgecheck$edge, 1, tofullKrigingspace, fixedlist=fixedlist))
colnames(locedge) <- blackbox.getOption("FONKgNames")
addtoedges(, locedge)
}
if (!is.null(edgecheck$resu)) {
if (edgecheck$resu$value>resu$value) {
resu <- edgecheck$resu
} ## else resu near the edge remains better that the best point on the edge => nothing to do
}
## resu$par is a vector of parameters profiled out
vkrig <- tofullKrigingspace(resu$par, fixedlist) ## conversion to full kriging param space
canon <- canonize(vkrig)$canonVP ## completion/conversion to canonical
zut <- resu;
if (length(notinKgspace)>0 || extrap==T) { ## the two cases where we don't already know the point to be in Kgspace
inKgspace <- isPointInCHull(vkrig, constraints=blackbox.getOption("hulls")$Kgtotal[c("a", "b")])
} else inKgspace <- T
## 'full' must be a suitable argument for tofullKrigingspace; zut$par is the $par element of the return value of constrOptim
zut <- c(zut, list(full=canon, message="", edgelevel=edgecheck$edgelevel, inKgspace=inKgspace, edge=list(edgecheck$edge), subHull_V_and_H=subHull_V_and_H))
return(zut) ## zut$canon is vector in canonical param space while zut$par (not always in return value) is vector of parameters profiled out
} ## end profile(...)
| /blackbox/R/profileFn.R | no_license | ingted/R-Examples | R | false | false | 16,360 | r | profileBySubHull <- function(fixedlist=NA, otherlist=NULL, extrap=F, locchull=NULL,
templateinkg=blackbox.getOption("rosglobal")$par, ##but another $par can be passed if this one is not in locchull
subHull_V_and_H,
max.only=T, ##this is for generateInitpts()
usezoom=F
) { ## Nb in metric units for metric plot.
## Likelihood profile POINT for fixed values of variables fixed, taking Out all variables not specified in fixed
## return value is list(CANON, purefn(canon), ......................)
## Values may be specified for fixed vars and for other variables.
## For the fixed variables, these values are mandatory (they specify for which value the profile is computed).
## For the other variables, these values are optional (they only propose starting values for the optimization algorithms),
## starting values WERE taken from maximizing values (canonVPoutput) if no value is explictly given.
## NOW trying to use convex hull as much as possible (see call to subchullWrapper() )
## Returns NA's for the canonical vector if bas>haut... which is lethal for
## all minimizations algos that somehow use the distance between points
## optimization: starting from rosglobal may miss the profile maximum in some cases;
## In that case, starting from the maximum for previous Nb helps producing smooth figures.
fittedNames <- blackbox.getOption("fittedNames")
## first some checks of the arguments
if ("Nb" %in% c(names(otherlist), names(fixedlist))) {
message.redef(c("(!) From profile(): 'Nb' in function argument is highly suspect"))
stop.redef()
}
notinKgspace <- names(fixedlist) %w/o% fittedNames
checknames <- notinKgspace %w/o% c(blackbox.getOption("ParameterNames"), "latt2Ns2", "Nratio", "NactNfounderratio", "NfounderNancratio")
if (length(checknames)>0) {
message.redef(c("Error: incorrect members ", checknames, " of fixedlist argument to profile() function"))
stop.redef()
} ## ELSE:
## Check that there are values for the 'fixed' variables:
if (any( ! is.numeric(as.numeric(fixedlist)))) {
message.redef("Error: no value for some fixed variable in profile() function:")
print(fixedlist)
return(list(message="Error: no value for some fixed variable in profile() function."))
}
## For extra composite variables
## we can test within-hullness in convex hull of Nb-transformed data,
## which does not make a convex hull in kriging space
## but would have been the hull if we had kriged in 2Ds2-space
###### => computing the hull for extra composite space
## do this only once in the whole FONKgpointls analysis for a given variable
if(is.null(locchull)) locchull <- providefullhull(names(fixedlist))[[1]] ## this works also without any composite variable
## since it comes from providefullhull it must have the affectedConstraints component
## control of test of within subhullness
if (length(notinKgspace)>0) { ## then default is as given in next line else it is double
if ( blackbox.getOption("redundant.mode")=="defaultPrecision") {
precision <- "double"
} else {precision <- blackbox.getOption("redundant.mode")}
} else precision <- "double"
######
## Now we know the variables of locchull ('full' dimensional, e.g. 2Nmu, 2Ds2, g), and this must exactly be those of bestProfiledOut + fixedlist !
## we first look whether there is something to profile
## It is much more efficient to add constraint to existing constraints $Hrep than using the vertices
if (missing(subHull_V_and_H)) subHull_V_and_H <- subHullWrapper(vertices=locchull$vertices, equality=fixedlist,
include.Hrepr=TRUE,
precision=precision ## FR->FR argument added 10/2015
)
subvertices <- subHull_V_and_H$vertices
if (is.null(subvertices)) { ## called when no point satisfy the constraints: safe exit from profile() with info
# suggests that testPoint is out of kriged points...
paramnbr <- blackbox.getOption("paramnbr")
return(list(fixedlist=fixedlist,
full=rep(NA, paramnbr),
par=rep(NA, paramnbr),
message="safe exit from profile() because is.null(subvertices)",
value=NA, ## NA is important else -Inf -> numeric RelLik value -> plotted.
inKgspace=FALSE,
subHull_V_and_H=subHull_V_and_H)
)
} ## null subvertices exit
## else
subverticesmax <- apply(subvertices, 2, max)
subverticesmin <- apply(subvertices, 2, min)
localmaxrange <- subverticesmax-subverticesmin
if (nrow(subvertices)==1 ##point unique: 'profile' is easily computed
|| max(localmaxrange)<0.000001 ## several rows but very close to each over
## so we are at the edge of the hull and numerical problems could occur
## + we don't need more precision
## FR->FR replaced 0.00001 by 0.000001 03/2011... very ad hoc anyway
) { ##point unique: 'profile' is easily computed
point <- colMeans(subvertices) ## don't lose colnames !
vkrig <- tofullKrigingspace(point, fixedlist) ## conversion to full kriging param space
canon <- canonize(vkrig)$canonVP ## completion/conversion to canonical
if (length(notinKgspace)>0 || extrap==T) { ## the two cases where we don't already know the point to be in Kgspace
inKgspace <- isPointInCHull(vkrig, constraints=blackbox.getOption("hulls")$Kgtotal[c("a", "b")])
} else inKgspace <- TRUE
return(list(message="", full=canon, par=point, value=purefn(vkrig, testhull=FALSE), subHull_V_and_H=subHull_V_and_H, inKgspace=inKgspace))
} ## ELSE : non trivial profiling to perform
#######################################################################
## currently (10/2011) precision controls
## (1) the call to subchullWrapper -> subchull -> redundant (but not the returnType which was always floating point)
## (2) whether constrOptim or constrOptimR is called...
## note that some code in subchull remains in double precision because there in no rational version of addHeq
## within hull if ui.x-ci>0 and Ax-b<0 i.e (-A)x+b>0 ie ui=(-A)= cols[-(1:2)] of the hrepr, ci= - col2
## subHullWrapper renvoit $Hrepr= qui inclut en colonnes b=-ci, et ui
ui <- subHull_V_and_H$Hrepr[, -c(1, 2), drop=FALSE]
ci <- - subHull_V_and_H$Hrepr[, 2] ## within-hullness occurs for ui %*% x -ci => 0
#### FR->FR a perhaps better way would be that subchullWrapper returns vertices as floating point, and constraints as <precision>,
if (precision=="rational") {ui <- d2q(ui);ci <- d2q(ci)}
## note that subvertices was computed in double precision because there in no rational version of addHeq
## We need to find a starting point for maximization
##see the subvertices as a disk in 3D space
## if roglobal is close to the inside of a face
## a vector inside the disk deduced heuristically from rosglobal and otherlist can be a good start point
## we construct such a point, unsure to be in the convex hull
## next we consider the case were the likelihood can be maximized on the edge of the coin
## finally we explore the face of the coin
profiledOutPars <- colnames(locchull$vertices) %w/o% names(fixedlist)
### first the unsure vector
bestProfiledOut <- as.numeric(array(NA, length(profiledOutPars)))
names(bestProfiledOut) <- profiledOutPars
### first the unsure vector
locnames <- intersect(profiledOutPars, fittedNames)
bestProfiledOut[locnames] <- templateinkg[locnames] ## rosglobal by default
## note that the scale of the hull should already be that of kriging, no additional log transf should be needed ##
## otherlist overrides the previous values
if (!is.null(otherlist)) {
locnames <- intersect(profiledOutPars, names(otherlist))
bestProfiledOut[locnames] <- unlist(otherlist[locnames])
## note that in current use otherlist values come from a grid generated in Kriging space, so again no additional log transf should be needed ##
}
if (any(is.na(bestProfiledOut))) {
message.redef("(!) any(is.na(bestProfiledOut))")
message.redef(paste("fixedlist was", fixedlist))
message.redef(paste("otherlist was", otherlist))
message.redef(paste("colnames(locchull$vertices) were", colnames(locchull$vertices)))
}
################
## designed to construct safe starting point(s) from an unsafe one
candidates <- generateInitpts(bestProfiledOut=bestProfiledOut, vertices=subvertices, ui=ui, ci=ci, hrepr=subHull_V_and_H$Hrepr,
fixedlist=fixedlist,
precision=precision, max.only=max.only)
# candidates <- generateInitpts(bestProfiledOut=bestProfiledOut, locsubchull=subvertices, ui=ui, ci=ci, fixedlist=fixedlist,
# precision=precision, max.only=max.only)
################
if (usezoom) { ## then additionally constructs another starting point (found useful at least once for a local maximum)
## a slow but more general methodn that was apparently useful for a LRT on latt2Ns2 when for kriging on condS2 and Nm is better estimated than 2Ns2
## usezoom currently true only in LRTfn -> profile
cP <- zoomProfile(fixedlist=fixedlist, extrap=extrap, locchull=locchull,
templateinkg=templateinkg, ## conversion to local hull within the function
precision=precision ##FR->FR NO NEED FOR RATIONAL HERE ??
) ## returns in canonical space
bestProfiledOut <- fromFONKtoanyspace(tofullKrigingspace(cP$par),
colnames(locchull$vertices))[profiledOutPars]
################
## designed to construct safe starting point(s) from an unsafe one
candidateszoom <- generateInitpts(bestProfiledOut=bestProfiledOut, vertices=subvertices, ui=ui, ci=ci, hrepr=subHull_V_and_H$Hrepr,
fixedlist=fixedlist,
precision=precision, max.only=max.only)
# candidateszoom <- generateInitpts(bestProfiledOut=bestProfiledOut, locsubchull=subvertices, ui=ui, ci=ci, fixedlist=fixedlist,
# precision=precision, max.only=max.only)
################
candidates <- c(candidates, candidateszoom)
}
## end of construction of bestProfiledOut
### Finally we have (a set of) candidate(s) in the hull of the profile. We maximize
if ("NLOPT_LN_COBYLA" %in% blackbox.getOption("optimizers")) {
## minimization of - logL
objfn_nloptr <- function(x,ui,ci) { ## all functions should have the same args. ui and ci will be ignored in this fn
names(x) <- names(bestProfiledOut) ## nloptr tends to lose names
return(- tofKpredict.nohull(x, fixedlist=fixedlist))
}
eval_g_ineq <- function(x,ui,ci) {max(ci - ui %*% x)} ## must be <0
try_nloptr_wrap <- function(...) {
resu <- try(nloptr(...))
if (! inherits(resu,"try-error")) {
resu$par <-resu$solution
names(resu$par) <- names(bestProfiledOut)
resu$value <- - resu$objective # back to + logL
}
return(resu)
}
} else {
## maximization of objfn = + logL (control$fnscale=-1)
objfn <- function(x) {tofKpredict.nohull(x, fixedlist=fixedlist)} ## the simple way to pass fixedlist to the inner computations...
objfn.grad <- function(x) {grad(func=objfn, x=x)} ## no need to specify fixedlist here
}
## initial value in 'vmin' (or 'vmmin') is not finite: function fun:=R(theta, theta.old, ...) may have returned Inf which means it considers the point is not in the hull represented by ui, ci
control <- list(fnscale = -1/blackbox.getOption("scalefactor"), trace = FALSE, maxit = 10000)
parscale <- localmaxrange
control <- c(control, list(parscale=parscale/blackbox.getOption("scalefactor"))) ## don't forget scalefactor here as it's in fnscale...
bestresu <- list(value=- Inf)
for (candidate in candidates) {
bestProfiledOut <- candidate$par
if (is.matrix(bestProfiledOut)) stop.redef("(!) is.matrix(bestProfiledOut)")
if (is.null(names(bestProfiledOut))) stop.redef("(!) is.null(names(bestProfiledOut))")
if (precision=="rational") {
resu <- try(constrOptimR(unlist(bestProfiledOut), objfn, grad=objfn.grad, ui=ui, ci=ci , mu=1e-08, ## a low mu appear important
method = "BFGS",
control = control)
)
} else {
if ("NLOPT_LN_COBYLA" %in% blackbox.getOption("optimizers")) { ## not efficient
resu <- try_nloptr_wrap(x0=unlist(bestProfiledOut),eval_f=objfn_nloptr,eval_g_ineq=eval_g_ineq,ui=ui,ci=ci,
opts=list(algorithm="NLOPT_LN_COBYLA",xtol_rel=1.0e-5,maxeval=-1)) ## BOBYQA = only BOund constraints
## dans constrOptim, le gradient est essentiel pour que l'optimisation contrainte marche
## Un constrOptim qui appelle NLOPT devrait donc utiliser une method avec gradient
} else {
resu <- try(constrOptim(unlist(bestProfiledOut), objfn, grad=objfn.grad, ui=ui, ci=ci , mu=1e-08, ## a low mu appear important
# localmaxrange=localmaxrange, ## arg de tofKpredict.nohull.grad
method = "BFGS",
# fixedlist = fixedlist,
control = control)
)
}
}
if (! inherits(resu,"try-error") && resu$value>bestresu$value) bestresu <- resu
} ## end loop over candidate starting points
if (bestresu$value==-Inf) {
## parameter value a edge or outside kriged range ? Seems to work most of the time, but far from always, when at edge
FONKgNames <- blackbox.getOption("FONKgNames")
zut <- list(full=rep(NA, length(FONKgNames)), par=rep(NA, length(FONKgNames)), value=NA, message="no valid constrOptim(R) return value within profile().",
inKgspace=F,
edgelevel=0, edge=list(), subHull_V_and_H=subHull_V_and_H)
return(zut)
}
# ELSE
resu <- bestresu
## One problem, however, is that such algos are a bit stuck when they meet a face of the hull
## so we will find whether this is so, and if so we will optimize on the face found
## (it is still possible that we are on a wrong face... => rr=0 or 1...)
fullerhull <- matchVertCons(locchull) ## In grid calls, time will be saved if locchull already contains the affectedConstraints.
edgecheck <- handlesPointAtEdge(point=resu$par, fullerhull=fullerhull, fixedlist=fixedlist)
if (!is.null(edgecheck$edge)) {
locedge <- t(apply(edgecheck$edge, 1, tofullKrigingspace, fixedlist=fixedlist))
colnames(locedge) <- blackbox.getOption("FONKgNames")
addtoedges(, locedge)
}
if (!is.null(edgecheck$resu)) {
if (edgecheck$resu$value>resu$value) {
resu <- edgecheck$resu
} ## else resu near the edge remains better that the best point on the edge => nothing to do
}
## resu$par is a vector of parameters profiled out
vkrig <- tofullKrigingspace(resu$par, fixedlist) ## conversion to full kriging param space
canon <- canonize(vkrig)$canonVP ## completion/conversion to canonical
zut <- resu;
if (length(notinKgspace)>0 || extrap==T) { ## the two cases where we don't already know the point to be in Kgspace
inKgspace <- isPointInCHull(vkrig, constraints=blackbox.getOption("hulls")$Kgtotal[c("a", "b")])
} else inKgspace <- T
## 'full' must be a suitable argument for tofullKrigingspace; zut$par is the $par element of the return value of constrOptim
zut <- c(zut, list(full=canon, message="", edgelevel=edgecheck$edgelevel, inKgspace=inKgspace, edge=list(edgecheck$edge), subHull_V_and_H=subHull_V_and_H))
return(zut) ## zut$canon is vector in canonical param space while zut$par (not always in return value) is vector of parameters profiled out
} ## end profile(...)
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix())
{
inv <- NULL
set <- function (y)
{
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get=get, setinverse = setinverse, getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...)
{
inv <- x$getinverse()
if(!is.null(inv))
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
return(inv)
} | /cachematrix.R | no_license | ThayaSubramanian/ProgrammingAssignment2 | R | false | false | 731 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix())
{
inv <- NULL
set <- function (y)
{
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get=get, setinverse = setinverse, getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...)
{
inv <- x$getinverse()
if(!is.null(inv))
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
return(inv)
} |
#Principal Component analysis
#This method is used to find similar patterns/features in dataset
# Perform scaled PCA: pr.out
pr.out=prcomp(x=pokemon,scale=TRUE,center=TRUE)
# Inspect model output
summary(pr.out)
# Variability of each principal component: pr.var
pr.var <- pr.out$sdev^2
# Variance explained by each principal component: pve
pve <- pr.var /sum(pr.var)
# Plot variance explained for each principal component
plot(pve, xlab = "Principal Component",
ylab = "Proportion of Variance Explained",
ylim = c(0, 1), type = "b")
# Plot cumulative proportion of variance explained
plot(cumsum(pve), xlab = "Principal Component",
ylab = "Cumulative Proportion of Variance Explained",
ylim = c(0, 1), type = "b")
#performing pca,clustering on one data
url <- "http://s3.amazonaws.com/assets.datacamp.com/production/course_1903/datasets/WisconsinCancer.csv"
# Download the data: wisc.df
wisc.df=read.csv(url)
# Convert the features of the data: wisc.data
wisc.data<-as.matrix(wisc.df[3:32])
# Set the row names of wisc.data
row.names(wisc.data) <- wisc.df$id
# Create diagnosis vector
diagnosis <- as.numeric(wisc.df$diagnosis == "M")
# Check column means and standard deviations
colMeans(wisc.data)
apply(wisc.data,2,sd)
# Execute PCA, scaling if appropriate: wisc.pr
wisc.pr=prcomp(wisc.data,scale=TRUE,center=TRUE)
# Look at summary of results
summary(wisc.pr)
# Create a biplot of wisc.pr
biplot(wisc.pr)
# Scatter plot observations by components 1 and 2
plot(wisc.pr$x[, c(1, 2)], col = (diagnosis + 1),
xlab = "PC1", ylab = "PC2")
# Repeat for components 1 and 3
plot(wisc.pr$x[,c(1,3)], col = (diagnosis + 1),
xlab = "PC1", ylab = "PC3")
# Do additional data exploration of your choosing below (optional)
# Set up 1 x 2 plotting grid
par(mfrow = c(1, 2))
# Calculate variability of each component
pr.var <- wisc.pr$sdev^2
# Variance explained by each principal component: pve
pve <- pr.var / sum(pr.var)
# Plot variance explained for each principal component
plot(pve, xlab = "Principal Component",
ylab = "Proportion of Variance Explained",
ylim = c(0, 1), type = "b")
# Plot cumulative proportion of variance explained
plot(cumsum(pve), xlab = "Principal Component",
ylab = "Cumulative Proportion of Variance Explained",
ylim = c(0, 1), type = "b")
# Scale the wisc.data data: data.scaled
data.scaled=scale(wisc.data)
# Calculate the (Euclidean) distances: data.dist
data.dist=dist(data.scaled)
# Create a hierarchical clustering model: wisc.hclust
wisc.hclust=hclust(data.dist,method='complete')
# Cut tree so that it has 4 clusters: wisc.hclust.clusters
wisc.hclust.clusters=cutree(wisc.hclust,k=4)
# Compare cluster membership to actual diagnoses
table(wisc.hclust.clusters,diagnosis)
# Create a k-means model on wisc.data: wisc.km
wisc.km <- kmeans(scale(wisc.data), centers = 2, nstart = 20)
# Compare k-means to actual diagnoses
table(wisc.km$cluster, diagnosis)
# Compare k-means to hierarchical clustering
table(wisc.hclust.clusters, wisc.km$cluster) | /pca.r | no_license | yashreds/hello-world | R | false | false | 3,158 | r | #Principal Component analysis
#This method is used to find similar patterns/features in dataset
# Perform scaled PCA: pr.out
pr.out=prcomp(x=pokemon,scale=TRUE,center=TRUE)
# Inspect model output
summary(pr.out)
# Variability of each principal component: pr.var
pr.var <- pr.out$sdev^2
# Variance explained by each principal component: pve
pve <- pr.var /sum(pr.var)
# Plot variance explained for each principal component
plot(pve, xlab = "Principal Component",
ylab = "Proportion of Variance Explained",
ylim = c(0, 1), type = "b")
# Plot cumulative proportion of variance explained
plot(cumsum(pve), xlab = "Principal Component",
ylab = "Cumulative Proportion of Variance Explained",
ylim = c(0, 1), type = "b")
#performing pca,clustering on one data
url <- "http://s3.amazonaws.com/assets.datacamp.com/production/course_1903/datasets/WisconsinCancer.csv"
# Download the data: wisc.df
wisc.df=read.csv(url)
# Convert the features of the data: wisc.data
wisc.data<-as.matrix(wisc.df[3:32])
# Set the row names of wisc.data
row.names(wisc.data) <- wisc.df$id
# Create diagnosis vector
diagnosis <- as.numeric(wisc.df$diagnosis == "M")
# Check column means and standard deviations
colMeans(wisc.data)
apply(wisc.data,2,sd)
# Execute PCA, scaling if appropriate: wisc.pr
wisc.pr=prcomp(wisc.data,scale=TRUE,center=TRUE)
# Look at summary of results
summary(wisc.pr)
# Create a biplot of wisc.pr
biplot(wisc.pr)
# Scatter plot observations by components 1 and 2
plot(wisc.pr$x[, c(1, 2)], col = (diagnosis + 1),
xlab = "PC1", ylab = "PC2")
# Repeat for components 1 and 3
plot(wisc.pr$x[,c(1,3)], col = (diagnosis + 1),
xlab = "PC1", ylab = "PC3")
# Do additional data exploration of your choosing below (optional)
# Set up 1 x 2 plotting grid
par(mfrow = c(1, 2))
# Calculate variability of each component
pr.var <- wisc.pr$sdev^2
# Variance explained by each principal component: pve
pve <- pr.var / sum(pr.var)
# Plot variance explained for each principal component
plot(pve, xlab = "Principal Component",
ylab = "Proportion of Variance Explained",
ylim = c(0, 1), type = "b")
# Plot cumulative proportion of variance explained
plot(cumsum(pve), xlab = "Principal Component",
ylab = "Cumulative Proportion of Variance Explained",
ylim = c(0, 1), type = "b")
# Scale the wisc.data data: data.scaled
data.scaled=scale(wisc.data)
# Calculate the (Euclidean) distances: data.dist
data.dist=dist(data.scaled)
# Create a hierarchical clustering model: wisc.hclust
wisc.hclust=hclust(data.dist,method='complete')
# Cut tree so that it has 4 clusters: wisc.hclust.clusters
wisc.hclust.clusters=cutree(wisc.hclust,k=4)
# Compare cluster membership to actual diagnoses
table(wisc.hclust.clusters,diagnosis)
# Create a k-means model on wisc.data: wisc.km
wisc.km <- kmeans(scale(wisc.data), centers = 2, nstart = 20)
# Compare k-means to actual diagnoses
table(wisc.km$cluster, diagnosis)
# Compare k-means to hierarchical clustering
table(wisc.hclust.clusters, wisc.km$cluster) |
library(PASWR2)
X <- COWS$butterfat[COWS$breed =="Ayrshire"]
Y <- COWS$butterfat[COWS$breed =="Canadian"]
t.test(X, Y)
FAT <- c(X, Y)
R <- 10^4 - 1
TS <- numeric(R)
for(i in 1:R){
index <- sample(20, 20, replace = FALSE)
TS[i] <- mean(FAT[index]) - mean(FAT[-index])
}
pvalue <- ((sum(TS >= abs(t.test(X, Y)$stat)))*2 + 1) / (R + 1)
pvalue
# Consider using dplyr
library(dplyr)
DF <- COWS %>%
filter(breed == "Ayrshire" | breed == "Canadian")
DF
X <- DF$butterfat[DF$breed == "Ayrshire"]
Y <- DF$butterfat[DF$breed == "Canadian"]
t.test(X, Y)
FAT <- c(X, Y)
R <- 10^4 - 1
TS <- numeric(R)
for(i in 1:R){
index <- sample(20, 20, replace = FALSE)
TS[i] <- mean(FAT[index]) - mean(FAT[-index])
}
pvalue <- ((sum(TS >= abs(t.test(X, Y)$stat)))*2 + 1) / (R + 1)
pvalue
#
t.test(butterfat ~ breed, data = DF)
PTS <- numeric(R)
for(i in 1:R){
PTS[i] <- t.test(butterfat ~ sample(breed), data = DF)$stat
}
pvalue <- (sum(PTS >= abs(t.test(butterfat ~ breed, data = DF)$stat))*2 + 1) / (R + 1)
pvalue | /Rscripts/Jan21_16.R | permissive | calebjbdavis/STT5812CourseRepo | R | false | false | 1,005 | r | library(PASWR2)
X <- COWS$butterfat[COWS$breed =="Ayrshire"]
Y <- COWS$butterfat[COWS$breed =="Canadian"]
t.test(X, Y)
FAT <- c(X, Y)
R <- 10^4 - 1
TS <- numeric(R)
for(i in 1:R){
index <- sample(20, 20, replace = FALSE)
TS[i] <- mean(FAT[index]) - mean(FAT[-index])
}
pvalue <- ((sum(TS >= abs(t.test(X, Y)$stat)))*2 + 1) / (R + 1)
pvalue
# Consider using dplyr
library(dplyr)
DF <- COWS %>%
filter(breed == "Ayrshire" | breed == "Canadian")
DF
X <- DF$butterfat[DF$breed == "Ayrshire"]
Y <- DF$butterfat[DF$breed == "Canadian"]
t.test(X, Y)
FAT <- c(X, Y)
R <- 10^4 - 1
TS <- numeric(R)
for(i in 1:R){
index <- sample(20, 20, replace = FALSE)
TS[i] <- mean(FAT[index]) - mean(FAT[-index])
}
pvalue <- ((sum(TS >= abs(t.test(X, Y)$stat)))*2 + 1) / (R + 1)
pvalue
#
t.test(butterfat ~ breed, data = DF)
PTS <- numeric(R)
for(i in 1:R){
PTS[i] <- t.test(butterfat ~ sample(breed), data = DF)$stat
}
pvalue <- (sum(PTS >= abs(t.test(butterfat ~ breed, data = DF)$stat))*2 + 1) / (R + 1)
pvalue |
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in Rtmph3UnEb/file63ef12109ec3
\name{reshape_comparison_results}
\alias{reshape_comparison_results}
\title{Convert comparison results to a more general format}
\usage{
reshape_comparison_results(oneComparisonResult, includeVars = TRUE,
includeEfficiency = TRUE, includeTiming = TRUE)
}
\arguments{
\item{oneComparisonResult}{An object returned by \code{\link{compareMCMCs}} (or \code{\link{combine_MCMC_comparison_results}} or \code{\link{rename_MCMC_comparison_method}}).}
\item{includeVars}{(default TRUE): whether to include the summary elements for each variable}
\item{includeEfficiency}{(default TRUE): whether to include the efficiency for each variable}
\item{includeTiming}{(default TRUE): whether to include the timing for each variable (which is the same for all variables from the same MCMC method)}
}
\value{
A data frame with the content from oneComparisonResult reshaped
}
\description{
Useful for making new kinds of figures or other needs
}
\details{
This is used internally by \code{\link{make_MCMC_comparison_pages}} but could also be useful to users who want to do their own thing with results from \code{\link{compareMCMCs}}.
}
| /packages/nimble/man/reshape_comparison_results.Rd | no_license | nxdao2000/nimble | R | false | false | 1,236 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in Rtmph3UnEb/file63ef12109ec3
\name{reshape_comparison_results}
\alias{reshape_comparison_results}
\title{Convert comparison results to a more general format}
\usage{
reshape_comparison_results(oneComparisonResult, includeVars = TRUE,
includeEfficiency = TRUE, includeTiming = TRUE)
}
\arguments{
\item{oneComparisonResult}{An object returned by \code{\link{compareMCMCs}} (or \code{\link{combine_MCMC_comparison_results}} or \code{\link{rename_MCMC_comparison_method}}).}
\item{includeVars}{(default TRUE): whether to include the summary elements for each variable}
\item{includeEfficiency}{(default TRUE): whether to include the efficiency for each variable}
\item{includeTiming}{(default TRUE): whether to include the timing for each variable (which is the same for all variables from the same MCMC method)}
}
\value{
A data frame with the content from oneComparisonResult reshaped
}
\description{
Useful for making new kinds of figures or other needs
}
\details{
This is used internally by \code{\link{make_MCMC_comparison_pages}} but could also be useful to users who want to do their own thing with results from \code{\link{compareMCMCs}}.
}
|
9a7c441e00be21932e4e6946add35ca4 query64_query31_1344n.qdimacs 1291 3161 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query64_query31_1344n/query64_query31_1344n.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 72 | r | 9a7c441e00be21932e4e6946add35ca4 query64_query31_1344n.qdimacs 1291 3161 |
require(ggplot2)
require(dplyr)
income <- read.csv(file="../01 Data/census-income-data.csv", header=TRUE, sep=",")
fatalPoliceShootings <- read.csv(file="../01 Data/fatal-police-shootings-data.csv", header=TRUE, sep=",")
incomeOfTheFatallyShot <- read.csv(file="../01 Data/fatal-police-shootings-and-census-income-data.csv", header=TRUE, sep=",")
#Histogram-----------------------------------------------------------------------------
histogram <- ggplot(incomeOfTheFatallyShot) + geom_histogram(aes(Per_Capita_Income, fill = Per_Capita_Income), binwidth = 700) + ggtitle("Count of the per capita of Fatal Police Shooting Individuals")
#End Histogram-----------------------------------------------------------------------------
#Box Plot-----------------------------------------------------------------------------
boxplot <- ggplot(incomeOfTheFatallyShot) + geom_boxplot(aes(x = flee, y = Median_Family_Income, fill = flee) ) + ggtitle("Median Family Income Boxplot of Individuals who suffered from \nFatal Police Shootings in 2015. \n\nThe x axis is the fleeing type, i.e. if anyone was fleeing and if so how.")
#End Box Plot-----------------------------------------------------------------------------
#Scatter Plot-----------------------------------------------------------------------------
scatterplot <- ggplot(incomeOfTheFatallyShot) + geom_point(aes(x = GINI, y = Median_Family_Income, color = armed)) + ggtitle("This plot shows the median family income vs. the gini index for individuals \nfrom fatal police shootings in 2015. \n\nThe color is the weapon said individuals were armed with.")
#End Scatter Plot-----------------------------------------------------------------------------
#CrossTabs-----------------------------------------------------------------------------
# First Plot
genderMentalIll <- dplyr::select(incomeOfTheFatallyShot, Per_Capita_Income, gender, signs_of_mental_illness)
countTotal <- genderMentalIll %>% mutate(Per_Capita_Range = ifelse(Per_Capita_Income < 26500, "low", ifelse(Per_Capita_Income < 31000 & Per_Capita_Income > 26500, "medium","high"))) %>% dplyr::count(Per_Capita_Range, gender, signs_of_mental_illness)
lowCapitaRange <- countTotal %>% filter(Per_Capita_Range == "low")
mediumCapitaRange <- countTotal %>% filter(Per_Capita_Range == "medium")
highCapitaRange <- countTotal %>% filter(Per_Capita_Range == "high")
capitaRangePlot <- ggplot() +
geom_text(data = lowCapitaRange, colour="#CC0000", aes(x=gender, y=signs_of_mental_illness, label = n),nudge_x = -0.2, size=10) +
geom_text(data = mediumCapitaRange, colour="000099", aes(x=gender, y=signs_of_mental_illness, label = n),nudge_x = 0, size=10) +
geom_text(data = highCapitaRange, colour="blue", aes(x=gender, y=signs_of_mental_illness, label = n),nudge_x = 0.2, size=10) + ggtitle("This plot shows the signs of mental illness vs. gender for individuals \nfrom fatal police shootings in 2015. \n\nThe text is ranges of count of per capita income for the individuals in \neach category. Red is the low per capita income, green is the middle \nand blue is the high.")
# Second Plot
plotDF <- dplyr::inner_join(income, fatalPoliceShootings, by = c("State" = "state")) %>%
dplyr::group_by(gender, race) %>%
dplyr::summarize(avg_median_income = mean(Median_Income))
subset <- dplyr::inner_join(income,fatalPoliceShootings, by = c("State" = "state")) %>% dplyr::filter(Median_Income >= 46000 & Median_Income <= 62000) %>%
dplyr::group_by(gender, race) %>%
dplyr::summarize(avg_median_income = mean(Median_Income))
genderRacePlot <- ggplot() +
geom_text(data = plotDF, aes(x= gender, y=race, label = avg_median_income), size=8) +
geom_text(data = subset, aes(x=gender, y=race, label = avg_median_income), nudge_y = -.5, size=4) + ggtitle("This plot shows the race vs. gender for individuals \nfrom fatal police shootings in 2015. \n\nThe large text Represents being Part of the mean of the top 25% \nHighest Median Incomes. Smaller text is the average of all \nremaining median incomes.")
#Third Plot
test <- incomeOfTheFatallyShot %>% dplyr::group_by(race,flee) %>% dplyr::summarise(income = median(Median_Income), MedianFamilyIncomePerCapitaIncomeRatio = median(Median_Family_Income/Per_Capita_Income))
raceFleePlot <- ggplot(test) +
theme(axis.text.x=element_text(angle=90, size=16, vjust=0.5)) +
theme(axis.text.y=element_text(size=16, hjust=0.5)) +
geom_text(aes(x=race, y=flee, label = income), size=6)+
geom_tile(aes(x=race, y=flee, fill=MedianFamilyIncomePerCapitaIncomeRatio), alpha=0.50) + ggtitle("This R visualization was created using the calculated fields of \nMedian(MedianFamilyIncome/PerCapitaIncome) and \nplotting based on how the individual from the fatal police \nshooting fled against the race of the individual shot.")
#End CrossTabs-----------------------------------------------------------------------------
#BarCharts -----------------------------------------------------------------------------
# Median Income by Race
incomeByRace <- incomeOfTheFatallyShot %>% dplyr::group_by(race, gender) %>% dplyr::summarize(avg_median_income = mean(Median_Income), sum_income = sum(Median_Income)) %>% dplyr::group_by(race, gender, avg_median_income) %>% dplyr::summarize(window_avg_income = mean(sum_income))
incomeByRacePlot <- ggplot(incomeByRace, aes(x = gender, y = avg_median_income)) +
geom_bar(stat = "identity") +
scale_y_continuous(labels = scales::comma) +
facet_wrap(~race, ncol=1) +
coord_flip() +
geom_text(mapping=aes(x=gender, y=avg_median_income, label=round(avg_median_income - window_avg_income)),colour="blue", hjust=-.5) + ggtitle("This plot shows the race vs. gender for individuals \nfrom fatal police shootings in 2015 the bars are the \naverage median income. \n\nThe blue numbers are a table calculation (the sum of the median \nincome - the window average of the median income).")
# Median Income by Fleeing
fleeMentalIncome <- incomeOfTheFatallyShot %>% dplyr::select(flee,signs_of_mental_illness,Median_Income) %>% group_by(signs_of_mental_illness,flee) %>% dplyr::filter(flee %in% c('Car','Foot','Not fleeing')) %>% summarise(Median_income = median(Median_Income))
fleePlot <- ggplot(fleeMentalIncome, aes(x=signs_of_mental_illness, y=Median_income, fill=signs_of_mental_illness)) +
theme(axis.text.x=element_text(angle=0, size=12, vjust=0.5)) +
theme(axis.text.y=element_text(size=12, hjust=0.5)) +
geom_bar(stat = "identity") +
facet_wrap(~flee, ncol=1) +
coord_flip() +
geom_hline(aes(yintercept = median(Median_income)), color="purple") + ggtitle("Income of Individuals from a Fatal Police Shooting. \nBroken up by if they were feeling and/or had signs of mental illness.")
#plot(fleePlot)
# Inequality Index for High Income Criminals
inequalityIndexforHighIncome <- incomeOfTheFatallyShot %>% dplyr::select(id,GINI,Median_Income) %>% mutate(Median_Income_Range = ifelse(Median_Income < 50000, "low", ifelse(Median_Income < 60000 & Median_Income > 50000, "medium","high"))) %>% dplyr::filter(Median_Income_Range == 'high',id > 1000)
inequalityPlot <- ggplot(inequalityIndexforHighIncome, aes(x=id, y=GINI, fill=Median_Income)) +
theme(axis.text.x=element_text(angle=0, size=12, vjust=0.5)) +
theme(axis.text.y=element_text(size=12, hjust=0.5)) +
geom_bar(stat = "identity") + ggtitle("Inequality Index For High Income Individuals from 2015 Fatal Police \nShootings. Each ID represents a person.")
plot(inequalityPlot)
#End Barcharts-----------------------------------------------------------------------------
# require("grid")
#
# setwd("../03 Visualizations")
#
# png("allPlots.png", width = 45, height = 45, units = "in", res = 120)
# grid.newpage()
# pushViewport(viewport(layout = grid.layout(2, 2)))
#
# # Print Plots
# print(incomeByRacePlot, vp = viewport(layout.pos.row = 1, layout.pos.col = 1))
# print(fleePlot, vp = viewport(layout.pos.row = 1, layout.pos.col = 2))
# print(inequalityPlot, vp = viewport(layout.pos.row = 2, layout.pos.col = 1))
#
# dev.off()
| /data/finalplots.R | no_license | carlos-loya/police-stats | R | false | false | 8,050 | r | require(ggplot2)
require(dplyr)
income <- read.csv(file="../01 Data/census-income-data.csv", header=TRUE, sep=",")
fatalPoliceShootings <- read.csv(file="../01 Data/fatal-police-shootings-data.csv", header=TRUE, sep=",")
incomeOfTheFatallyShot <- read.csv(file="../01 Data/fatal-police-shootings-and-census-income-data.csv", header=TRUE, sep=",")
#Histogram-----------------------------------------------------------------------------
histogram <- ggplot(incomeOfTheFatallyShot) + geom_histogram(aes(Per_Capita_Income, fill = Per_Capita_Income), binwidth = 700) + ggtitle("Count of the per capita of Fatal Police Shooting Individuals")
#End Histogram-----------------------------------------------------------------------------
#Box Plot-----------------------------------------------------------------------------
boxplot <- ggplot(incomeOfTheFatallyShot) + geom_boxplot(aes(x = flee, y = Median_Family_Income, fill = flee) ) + ggtitle("Median Family Income Boxplot of Individuals who suffered from \nFatal Police Shootings in 2015. \n\nThe x axis is the fleeing type, i.e. if anyone was fleeing and if so how.")
#End Box Plot-----------------------------------------------------------------------------
#Scatter Plot-----------------------------------------------------------------------------
scatterplot <- ggplot(incomeOfTheFatallyShot) + geom_point(aes(x = GINI, y = Median_Family_Income, color = armed)) + ggtitle("This plot shows the median family income vs. the gini index for individuals \nfrom fatal police shootings in 2015. \n\nThe color is the weapon said individuals were armed with.")
#End Scatter Plot-----------------------------------------------------------------------------
#CrossTabs-----------------------------------------------------------------------------
# First Plot
genderMentalIll <- dplyr::select(incomeOfTheFatallyShot, Per_Capita_Income, gender, signs_of_mental_illness)
countTotal <- genderMentalIll %>% mutate(Per_Capita_Range = ifelse(Per_Capita_Income < 26500, "low", ifelse(Per_Capita_Income < 31000 & Per_Capita_Income > 26500, "medium","high"))) %>% dplyr::count(Per_Capita_Range, gender, signs_of_mental_illness)
lowCapitaRange <- countTotal %>% filter(Per_Capita_Range == "low")
mediumCapitaRange <- countTotal %>% filter(Per_Capita_Range == "medium")
highCapitaRange <- countTotal %>% filter(Per_Capita_Range == "high")
capitaRangePlot <- ggplot() +
geom_text(data = lowCapitaRange, colour="#CC0000", aes(x=gender, y=signs_of_mental_illness, label = n),nudge_x = -0.2, size=10) +
geom_text(data = mediumCapitaRange, colour="000099", aes(x=gender, y=signs_of_mental_illness, label = n),nudge_x = 0, size=10) +
geom_text(data = highCapitaRange, colour="blue", aes(x=gender, y=signs_of_mental_illness, label = n),nudge_x = 0.2, size=10) + ggtitle("This plot shows the signs of mental illness vs. gender for individuals \nfrom fatal police shootings in 2015. \n\nThe text is ranges of count of per capita income for the individuals in \neach category. Red is the low per capita income, green is the middle \nand blue is the high.")
# Second Plot
plotDF <- dplyr::inner_join(income, fatalPoliceShootings, by = c("State" = "state")) %>%
dplyr::group_by(gender, race) %>%
dplyr::summarize(avg_median_income = mean(Median_Income))
subset <- dplyr::inner_join(income,fatalPoliceShootings, by = c("State" = "state")) %>% dplyr::filter(Median_Income >= 46000 & Median_Income <= 62000) %>%
dplyr::group_by(gender, race) %>%
dplyr::summarize(avg_median_income = mean(Median_Income))
genderRacePlot <- ggplot() +
geom_text(data = plotDF, aes(x= gender, y=race, label = avg_median_income), size=8) +
geom_text(data = subset, aes(x=gender, y=race, label = avg_median_income), nudge_y = -.5, size=4) + ggtitle("This plot shows the race vs. gender for individuals \nfrom fatal police shootings in 2015. \n\nThe large text Represents being Part of the mean of the top 25% \nHighest Median Incomes. Smaller text is the average of all \nremaining median incomes.")
#Third Plot
test <- incomeOfTheFatallyShot %>% dplyr::group_by(race,flee) %>% dplyr::summarise(income = median(Median_Income), MedianFamilyIncomePerCapitaIncomeRatio = median(Median_Family_Income/Per_Capita_Income))
raceFleePlot <- ggplot(test) +
theme(axis.text.x=element_text(angle=90, size=16, vjust=0.5)) +
theme(axis.text.y=element_text(size=16, hjust=0.5)) +
geom_text(aes(x=race, y=flee, label = income), size=6)+
geom_tile(aes(x=race, y=flee, fill=MedianFamilyIncomePerCapitaIncomeRatio), alpha=0.50) + ggtitle("This R visualization was created using the calculated fields of \nMedian(MedianFamilyIncome/PerCapitaIncome) and \nplotting based on how the individual from the fatal police \nshooting fled against the race of the individual shot.")
#End CrossTabs-----------------------------------------------------------------------------
#BarCharts -----------------------------------------------------------------------------
# Median Income by Race
incomeByRace <- incomeOfTheFatallyShot %>% dplyr::group_by(race, gender) %>% dplyr::summarize(avg_median_income = mean(Median_Income), sum_income = sum(Median_Income)) %>% dplyr::group_by(race, gender, avg_median_income) %>% dplyr::summarize(window_avg_income = mean(sum_income))
incomeByRacePlot <- ggplot(incomeByRace, aes(x = gender, y = avg_median_income)) +
geom_bar(stat = "identity") +
scale_y_continuous(labels = scales::comma) +
facet_wrap(~race, ncol=1) +
coord_flip() +
geom_text(mapping=aes(x=gender, y=avg_median_income, label=round(avg_median_income - window_avg_income)),colour="blue", hjust=-.5) + ggtitle("This plot shows the race vs. gender for individuals \nfrom fatal police shootings in 2015 the bars are the \naverage median income. \n\nThe blue numbers are a table calculation (the sum of the median \nincome - the window average of the median income).")
# Median Income by Fleeing
fleeMentalIncome <- incomeOfTheFatallyShot %>% dplyr::select(flee,signs_of_mental_illness,Median_Income) %>% group_by(signs_of_mental_illness,flee) %>% dplyr::filter(flee %in% c('Car','Foot','Not fleeing')) %>% summarise(Median_income = median(Median_Income))
fleePlot <- ggplot(fleeMentalIncome, aes(x=signs_of_mental_illness, y=Median_income, fill=signs_of_mental_illness)) +
theme(axis.text.x=element_text(angle=0, size=12, vjust=0.5)) +
theme(axis.text.y=element_text(size=12, hjust=0.5)) +
geom_bar(stat = "identity") +
facet_wrap(~flee, ncol=1) +
coord_flip() +
geom_hline(aes(yintercept = median(Median_income)), color="purple") + ggtitle("Income of Individuals from a Fatal Police Shooting. \nBroken up by if they were feeling and/or had signs of mental illness.")
#plot(fleePlot)
# Inequality Index for High Income Criminals
inequalityIndexforHighIncome <- incomeOfTheFatallyShot %>% dplyr::select(id,GINI,Median_Income) %>% mutate(Median_Income_Range = ifelse(Median_Income < 50000, "low", ifelse(Median_Income < 60000 & Median_Income > 50000, "medium","high"))) %>% dplyr::filter(Median_Income_Range == 'high',id > 1000)
inequalityPlot <- ggplot(inequalityIndexforHighIncome, aes(x=id, y=GINI, fill=Median_Income)) +
theme(axis.text.x=element_text(angle=0, size=12, vjust=0.5)) +
theme(axis.text.y=element_text(size=12, hjust=0.5)) +
geom_bar(stat = "identity") + ggtitle("Inequality Index For High Income Individuals from 2015 Fatal Police \nShootings. Each ID represents a person.")
plot(inequalityPlot)
#End Barcharts-----------------------------------------------------------------------------
# require("grid")
#
# setwd("../03 Visualizations")
#
# png("allPlots.png", width = 45, height = 45, units = "in", res = 120)
# grid.newpage()
# pushViewport(viewport(layout = grid.layout(2, 2)))
#
# # Print Plots
# print(incomeByRacePlot, vp = viewport(layout.pos.row = 1, layout.pos.col = 1))
# print(fleePlot, vp = viewport(layout.pos.row = 1, layout.pos.col = 2))
# print(inequalityPlot, vp = viewport(layout.pos.row = 2, layout.pos.col = 1))
#
# dev.off()
|
#### Test differences for within and between group distances for beta
######################################################################
#For PCoA by body sites sep, color by infant
pcoa_dir_baby <- paste(main_fp, "beta_div/PCOA_baby/", sep='/')
names(baby_colors) <- unique(mapping$subject_id)
for(b in 1:length(beta_tables)){
beta_div <- beta_tables[[b]]
beta_name <- beta_metrics[b]
plot_list <- c()
#make stats file
file_name <- paste(pcoa_dir_baby, "Beta_Stats_", beta_name, ".txt", sep='')
sink(file_name)
sink()
for(a in 1:length(Bodysites_B)){
site <- Bodysites_B[[a]]
beta_subset <- beta_div[site,site]
#Set 1 will be one infant
#Set 2 will be all other infants
infant_ids <- unique(mapping[rownames(beta_subset),"subject_id"])
within <- c()
between <- c()
for(n in 1:length(infant_ids)){
set1 <- intersect(rownames(beta_subset), rownames(mapping[mapping$subject_id == infant_ids[n],]))
set2 <- intersect(rownames(beta_subset), rownames(mapping[!mapping$subject_id == infant_ids[n],]))
full_set <- c(set1, set2)
set1_within <- c()
between_sets <- c()
if(length(set1) > 2 && length(set2) > 2){
for(c in 1:length(colnames(beta_div))){
for(r in (c+1):length(rownames(beta_div))){
if(rownames(beta_div)[r] %in% set1 && colnames(beta_div)[c] %in% set1){
set1_within <- c(set1_within, beta_div[r,c])
} else {
if(rownames(beta_div)[r] %in% set1 && colnames(beta_div)[c] %in% set2){
between_sets <- c(between_sets, beta_div[r,c])
}
}
}
}
}
within <- c(within, set1_within)
between <- c(between_sets, between)
}
sets_test <- list(within, between)
names(sets_test) <- c("Within", "Between")
wtest <- wilcox.test(within, between)
pval <- wtest$p.value
distances <- melt(sets_test)
#print stats to screen
cat(sprintf('\n%s,%s:\n',names(Bodysites_B)[a], beta_name))
print(wtest)
#write stats to file
sink(file_name, append =TRUE)
cat(sprintf('\n%s,%s:\n',names(Bodysites_B)[a], beta_name))
print(wtest)
sink()
#assign pdf name for plot
name1 <- paste(pcoa_dir_baby, "Beta_dists_", beta_name, "_", names(Bodysites_B)[a], ".pdf", sep='')
pdf(name1, height=4,width=6)
#make beta div box plots
plot1 <- ggplot(distances, aes(x=L1, y=value, fill=L1)) +
geom_boxplot(outlier.shape = NA) +
#geom_jitter(position=position_jitter(0.1), shape=1, size=1) +
theme_cowplot(font_size=7) +
annotate("text", x="Between", y=1.0, label= paste("(wilcox)P=", round(pval, digits=3)), size=2) +
guides(fill=FALSE) +
labs(x="", y = paste(beta_name, "distance")) +
scale_fill_manual(values= c("#E88D3F", "#5F8CA2"))
plot(plot1)
dev.off()
}
}
| /bin/beta.div.r | no_license | TonyaWard/fungal_infant1 | R | false | false | 2,959 | r | #### Test differences for within and between group distances for beta
######################################################################
#For PCoA by body sites sep, color by infant
pcoa_dir_baby <- paste(main_fp, "beta_div/PCOA_baby/", sep='/')
names(baby_colors) <- unique(mapping$subject_id)
for(b in 1:length(beta_tables)){
beta_div <- beta_tables[[b]]
beta_name <- beta_metrics[b]
plot_list <- c()
#make stats file
file_name <- paste(pcoa_dir_baby, "Beta_Stats_", beta_name, ".txt", sep='')
sink(file_name)
sink()
for(a in 1:length(Bodysites_B)){
site <- Bodysites_B[[a]]
beta_subset <- beta_div[site,site]
#Set 1 will be one infant
#Set 2 will be all other infants
infant_ids <- unique(mapping[rownames(beta_subset),"subject_id"])
within <- c()
between <- c()
for(n in 1:length(infant_ids)){
set1 <- intersect(rownames(beta_subset), rownames(mapping[mapping$subject_id == infant_ids[n],]))
set2 <- intersect(rownames(beta_subset), rownames(mapping[!mapping$subject_id == infant_ids[n],]))
full_set <- c(set1, set2)
set1_within <- c()
between_sets <- c()
if(length(set1) > 2 && length(set2) > 2){
for(c in 1:length(colnames(beta_div))){
for(r in (c+1):length(rownames(beta_div))){
if(rownames(beta_div)[r] %in% set1 && colnames(beta_div)[c] %in% set1){
set1_within <- c(set1_within, beta_div[r,c])
} else {
if(rownames(beta_div)[r] %in% set1 && colnames(beta_div)[c] %in% set2){
between_sets <- c(between_sets, beta_div[r,c])
}
}
}
}
}
within <- c(within, set1_within)
between <- c(between_sets, between)
}
sets_test <- list(within, between)
names(sets_test) <- c("Within", "Between")
wtest <- wilcox.test(within, between)
pval <- wtest$p.value
distances <- melt(sets_test)
#print stats to screen
cat(sprintf('\n%s,%s:\n',names(Bodysites_B)[a], beta_name))
print(wtest)
#write stats to file
sink(file_name, append =TRUE)
cat(sprintf('\n%s,%s:\n',names(Bodysites_B)[a], beta_name))
print(wtest)
sink()
#assign pdf name for plot
name1 <- paste(pcoa_dir_baby, "Beta_dists_", beta_name, "_", names(Bodysites_B)[a], ".pdf", sep='')
pdf(name1, height=4,width=6)
#make beta div box plots
plot1 <- ggplot(distances, aes(x=L1, y=value, fill=L1)) +
geom_boxplot(outlier.shape = NA) +
#geom_jitter(position=position_jitter(0.1), shape=1, size=1) +
theme_cowplot(font_size=7) +
annotate("text", x="Between", y=1.0, label= paste("(wilcox)P=", round(pval, digits=3)), size=2) +
guides(fill=FALSE) +
labs(x="", y = paste(beta_name, "distance")) +
scale_fill_manual(values= c("#E88D3F", "#5F8CA2"))
plot(plot1)
dev.off()
}
}
|
require(pacman)
p_load(tidyverse,RODBC,tidycensus)
# create an empty database in MS access
# build connection to that database
# (must specify full file path, apparently)
dta <- odbcConnectAccess2007("C:/Users/Joem/Dropbox (University of Oregon)/My PC (DESKTOP-8RMGF3S)/Documents/howdb/wapo_shootings.accdb")
# get the names of tables in the database
sqlTables(dta)
# pull external data into R
wapo <- read.csv("https://github.com/washingtonpost/data-police-shootings/releases/download/v0.1/fatal-police-shootings-data.csv")
# do cool R things to that data if necessary
# push that data into the database
sqlSave(dta,wapo,tablename="fatal",rownames = F)
# prints character(0) on success
# set a primary key with a query
sqlQuery(dta,"ALTER TABLE fatal
ADD PRIMARY KEY (id);")
# get more external data and set it up in database
vars <- load_variables(2010,"sf1")
pops <- get_decennial(geography = "state",
year=2010,
variables="P001001")
# some cleaning in R because I hate cleaning in SQL
pops$state <- state.abb[match(pops$NAME,state.name)]
pops$state <- ifelse(pops$NAME=="District of Columbia","DC",pops$state)
pops <- pops %>% dplyr::filter(!is.na(state)) %>% dplyr::select(-variable,-GEOID,-NAME)
names(pops)[which(names(pops)=="value")] <- "population"
# push cleaned up data into database
sqlSave(dta,pops,tablename="pops",rownames=F)
# add primary key
sqlQuery(dta,"ALTER TABLE pops
ADD PRIMARY KEY (state);")
# add foreign key to our main table
sqlQuery(dta,"ALTER TABLE fatal
ADD FOREIGN KEY (state) REFERENCES pops(state);")
# make a new table from existing tables:
# SELECT [var names] INTO [new table name] FROM [query or existing table]
sqlQuery(dta,"SELECT * INTO fatalwithpops FROM fatal LEFT JOIN pops ON fatal.state = pops.state;")
# stacked subqueries to get a table with states and police-deaths per 100,000 (since 2015)
# order that table by death rate
# working from the inside out:
# SELECT COUNT(id), [pops_state] FROM fatalwithpops GROUP BY [pops_state]
# groups fatalwithpops by state and gets the total deaths in each state
# AS dcount
# gives an alias (dcount) to the state totals table
# SELECT population, state, Expr1000 FROM pops LEFT JOIN (
# left joins the pops table (state abbs and populations) with dcount, by state abb
# SELECT state, 100000 * Expr1000/population as deathrate FROM (
# calculates rates per 100,000 and names this variable deathrate
# SELECT * FROM (
# outermost query is only necessary so that we can alias the table with the rates (I just called it a)
# and then sort that aliased table by death rate
sqlQuery(dta,"SELECT * FROM (
SELECT state, 100000 * Expr1000/population as deathrate FROM (
SELECT population, state, Expr1000 FROM pops LEFT JOIN (
(
SELECT COUNT(id), [pops_state] FROM fatalwithpops GROUP BY [pops_state]
)
AS dcount
)
ON pops.[state] = dcount.[pops_state]
)
) as a
ORDER BY a.deathrate
;")
sqlQuery(dta,"SELECT Expr1000, population, state FROM NEWTABLE LEFT JOIN pops ON NEWTABLE.pops_state = pops.state")
sqlQuery(dta,"SELECT a.id, a.state FROM fatal AS a")
############ other commands that work #############
sqlQuery(dta,"SELECT COUNT(id), pops_state INTO NEWTABLE FROM fatalwithpops GROUP BY pops_state")
sqlQuery(dta,"SELECT COUNT(id), pops_state FROM fatalwithpops GROUP BY pops_state")
sqlQuery(dta,"SELECT * FROM fatalwithpops ORDER BY population")
# create a useless new table (var names and types, no data)
sqlQuery(dta,"CREATE TABLE Persons (
PersonID int,
LastName varchar(255),
FirstName varchar(255),
Address varchar(255),
City varchar(255)
);")
odbcCloseAll()
| /practice1.R | no_license | JoeMitchellNelson/howdb | R | false | false | 3,883 | r | require(pacman)
p_load(tidyverse,RODBC,tidycensus)
# create an empty database in MS access
# build connection to that database
# (must specify full file path, apparently)
dta <- odbcConnectAccess2007("C:/Users/Joem/Dropbox (University of Oregon)/My PC (DESKTOP-8RMGF3S)/Documents/howdb/wapo_shootings.accdb")
# get the names of tables in the database
sqlTables(dta)
# pull external data into R
wapo <- read.csv("https://github.com/washingtonpost/data-police-shootings/releases/download/v0.1/fatal-police-shootings-data.csv")
# do cool R things to that data if necessary
# push that data into the database
sqlSave(dta,wapo,tablename="fatal",rownames = F)
# prints character(0) on success
# set a primary key with a query
sqlQuery(dta,"ALTER TABLE fatal
ADD PRIMARY KEY (id);")
# get more external data and set it up in database
vars <- load_variables(2010,"sf1")
pops <- get_decennial(geography = "state",
year=2010,
variables="P001001")
# some cleaning in R because I hate cleaning in SQL
pops$state <- state.abb[match(pops$NAME,state.name)]
pops$state <- ifelse(pops$NAME=="District of Columbia","DC",pops$state)
pops <- pops %>% dplyr::filter(!is.na(state)) %>% dplyr::select(-variable,-GEOID,-NAME)
names(pops)[which(names(pops)=="value")] <- "population"
# push cleaned up data into database
sqlSave(dta,pops,tablename="pops",rownames=F)
# add primary key
sqlQuery(dta,"ALTER TABLE pops
ADD PRIMARY KEY (state);")
# add foreign key to our main table
sqlQuery(dta,"ALTER TABLE fatal
ADD FOREIGN KEY (state) REFERENCES pops(state);")
# make a new table from existing tables:
# SELECT [var names] INTO [new table name] FROM [query or existing table]
sqlQuery(dta,"SELECT * INTO fatalwithpops FROM fatal LEFT JOIN pops ON fatal.state = pops.state;")
# stacked subqueries to get a table with states and police-deaths per 100,000 (since 2015)
# order that table by death rate
# working from the inside out:
# SELECT COUNT(id), [pops_state] FROM fatalwithpops GROUP BY [pops_state]
# groups fatalwithpops by state and gets the total deaths in each state
# AS dcount
# gives an alias (dcount) to the state totals table
# SELECT population, state, Expr1000 FROM pops LEFT JOIN (
# left joins the pops table (state abbs and populations) with dcount, by state abb
# SELECT state, 100000 * Expr1000/population as deathrate FROM (
# calculates rates per 100,000 and names this variable deathrate
# SELECT * FROM (
# outermost query is only necessary so that we can alias the table with the rates (I just called it a)
# and then sort that aliased table by death rate
sqlQuery(dta,"SELECT * FROM (
SELECT state, 100000 * Expr1000/population as deathrate FROM (
SELECT population, state, Expr1000 FROM pops LEFT JOIN (
(
SELECT COUNT(id), [pops_state] FROM fatalwithpops GROUP BY [pops_state]
)
AS dcount
)
ON pops.[state] = dcount.[pops_state]
)
) as a
ORDER BY a.deathrate
;")
sqlQuery(dta,"SELECT Expr1000, population, state FROM NEWTABLE LEFT JOIN pops ON NEWTABLE.pops_state = pops.state")
sqlQuery(dta,"SELECT a.id, a.state FROM fatal AS a")
############ other commands that work #############
sqlQuery(dta,"SELECT COUNT(id), pops_state INTO NEWTABLE FROM fatalwithpops GROUP BY pops_state")
sqlQuery(dta,"SELECT COUNT(id), pops_state FROM fatalwithpops GROUP BY pops_state")
sqlQuery(dta,"SELECT * FROM fatalwithpops ORDER BY population")
# create a useless new table (var names and types, no data)
sqlQuery(dta,"CREATE TABLE Persons (
PersonID int,
LastName varchar(255),
FirstName varchar(255),
Address varchar(255),
City varchar(255)
);")
odbcCloseAll()
|
## SIA-BRA_biomes
# Diniz-Reis et al 2021
# GEB
# MixSiar
install.packages("MixSIAR")
install.packages("ggplot2")
library(MixSIAR)
library(mcmc)
library(ggplot2)
# Consumers: 'fish_consumer', 'mammal_consumer','bird_consumer'
mix.filename <- system.file("extdata", "consumer.csv", package = "MixSIAR")
mix <- load_mix_data(filename=mix.filename,
iso_names=c("d13C"),
factors=c("biome"),
fac_random=c(TRUE),
fac_nested=c(FALSE),
cont_effects=NULL)
# Source: C3 and C4 plants
source.filename <- system.file("extdata", "source_bra.csv", package = "MixSIAR")
source <- load_source_data(filename=source.filename, source_factors="biome",
conc_dep=FALSE, data_type="means", mix)
# Discrimination: zero = d13Cd and d15Nd
discr.filename <- system.file("extdata", "discriminatrion_bra.csv", package = "MixSIAR")
discr <- load_discr_data(filename=discr.filename, mix)
plot_prior(alpha.prior=1,source)
# model
model_filename <- "MixSIAR_model.txt"
resid_err <- TRUE
process_err <- TRUE
write_JAGS_model(model_filename, resid_err, process_err, mix, source)
jags.1 <- run_model(run="test", mix, source, discr, model_filename, alpha.prior = 1)
output_options <- list(summary_save = TRUE,
summary_name = "summary_statistics",
sup_post = FALSE,
plot_post_save_pdf = TRUE,
plot_post_name = "posterior_density",
sup_pairs = FALSE,
plot_pairs_save_pdf = TRUE,
plot_pairs_name = "pairs_plot",
sup_xy = TRUE,
plot_xy_save_pdf = FALSE,
plot_xy_name = "xy_plot",
gelman = TRUE,
heidel = FALSE,
geweke = TRUE,
diag_save = TRUE,
diag_name = "diagnostics",
indiv_effect = FALSE,
plot_post_save_png = FALSE,
plot_pairs_save_png = FALSE,
plot_xy_save_png = FALSE,
diag_save_ggmcmc = FALSE,
return_obj = TRUE)
output_JAGS(jags.1, mix, source, output_options)
# Plot contributions - each consumer
grafico<-g.post$fac1[[4]]+ scale_x_continuous(breaks = seq(0,1,0.1), limits=c(0, 1))+
scale_colour_manual(values=c("#000001","#009E73", "#D55E00", "#56B4E9")) +
scale_fill_manual(values=c("#000001","#009E73", "#D55E00", "#56B4E9"))+
theme(axis.line.x = element_line(size = 0.5, colour = "black"),
axis.line.y = element_line(size = 0.5, colour = "black"),
axis.line = element_line(size=1, colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_text(size=12, color="black"),
axis.text.y=element_text(size=12, color="black"),
axis.title.x=element_text(size=18, face="bold"),
axis.title.y=element_text(size=16, face="bold"),
legend.title = element_blank(),
legend.text = element_text(size=14, face="bold"))
grafico
ggsave("mixsiar_class_consumer.jpeg", units="cm", width=14, height=12, dpi=300)
dev.off() | /SIA_BRA_biomes_MixSIAR.R | permissive | SIA-BRA/SIA-BRA | R | false | false | 3,583 | r | ## SIA-BRA_biomes
# Diniz-Reis et al 2021
# GEB
# MixSiar
install.packages("MixSIAR")
install.packages("ggplot2")
library(MixSIAR)
library(mcmc)
library(ggplot2)
# Consumers: 'fish_consumer', 'mammal_consumer','bird_consumer'
mix.filename <- system.file("extdata", "consumer.csv", package = "MixSIAR")
mix <- load_mix_data(filename=mix.filename,
iso_names=c("d13C"),
factors=c("biome"),
fac_random=c(TRUE),
fac_nested=c(FALSE),
cont_effects=NULL)
# Source: C3 and C4 plants
source.filename <- system.file("extdata", "source_bra.csv", package = "MixSIAR")
source <- load_source_data(filename=source.filename, source_factors="biome",
conc_dep=FALSE, data_type="means", mix)
# Discrimination: zero = d13Cd and d15Nd
discr.filename <- system.file("extdata", "discriminatrion_bra.csv", package = "MixSIAR")
discr <- load_discr_data(filename=discr.filename, mix)
plot_prior(alpha.prior=1,source)
# model
model_filename <- "MixSIAR_model.txt"
resid_err <- TRUE
process_err <- TRUE
write_JAGS_model(model_filename, resid_err, process_err, mix, source)
jags.1 <- run_model(run="test", mix, source, discr, model_filename, alpha.prior = 1)
output_options <- list(summary_save = TRUE,
summary_name = "summary_statistics",
sup_post = FALSE,
plot_post_save_pdf = TRUE,
plot_post_name = "posterior_density",
sup_pairs = FALSE,
plot_pairs_save_pdf = TRUE,
plot_pairs_name = "pairs_plot",
sup_xy = TRUE,
plot_xy_save_pdf = FALSE,
plot_xy_name = "xy_plot",
gelman = TRUE,
heidel = FALSE,
geweke = TRUE,
diag_save = TRUE,
diag_name = "diagnostics",
indiv_effect = FALSE,
plot_post_save_png = FALSE,
plot_pairs_save_png = FALSE,
plot_xy_save_png = FALSE,
diag_save_ggmcmc = FALSE,
return_obj = TRUE)
output_JAGS(jags.1, mix, source, output_options)
# Plot contributions - each consumer
grafico<-g.post$fac1[[4]]+ scale_x_continuous(breaks = seq(0,1,0.1), limits=c(0, 1))+
scale_colour_manual(values=c("#000001","#009E73", "#D55E00", "#56B4E9")) +
scale_fill_manual(values=c("#000001","#009E73", "#D55E00", "#56B4E9"))+
theme(axis.line.x = element_line(size = 0.5, colour = "black"),
axis.line.y = element_line(size = 0.5, colour = "black"),
axis.line = element_line(size=1, colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_text(size=12, color="black"),
axis.text.y=element_text(size=12, color="black"),
axis.title.x=element_text(size=18, face="bold"),
axis.title.y=element_text(size=16, face="bold"),
legend.title = element_blank(),
legend.text = element_text(size=14, face="bold"))
grafico
ggsave("mixsiar_class_consumer.jpeg", units="cm", width=14, height=12, dpi=300)
dev.off() |
install.packages("e1071")
library(e1071)
regressor1=svm(formula=Profit~R.D.Spend,data=training_set,type="eps-regression",gamma=1,cost=0.01)
regressor2=svm(formula=Profit~R.D.Spend,data=training_set,type="eps-regression",gamma=1,cost=0.1)
regressor3=svm(formula=Profit~R.D.Spend,data=training_set,type="eps-regression",gamma=1,cost=1)
regressor4=svm(formula=Profit~R.D.Spend,data=training_set,type="eps-regression",gamma=1,cost=10)
regressor5=svm(formula=Profit~R.D.Spend,data=training_set,type="eps-regression",gamma=1,cost=100)
plot_svr=ggplot(data=training_set)
plot_svr+geom_point(aes(x=R.D.Spend,y=Profit),color="Red",size=0.1)+
geom_line(aes(x=R.D.Spend,y=predict(regressor1,R.D.Spend=training_set$R.D.Spend)),color="Purple")+
geom_line(aes(x=R.D.Spend,y=predict(regressor2,R.D.Spend=training_set$R.D.Spend)),color="Blue")+
geom_line(aes(x=R.D.Spend,y=predict(regressor3,R.D.Spend=training_set$R.D.Spend)),color="Yellow")+
geom_line(aes(x=R.D.Spend,y=predict(regressor4,R.D.Spend=training_set$R.D.Spend)),color="Red")+
geom_line(aes(x=R.D.Spend,y=predict(regressor5,R.D.Spend=training_set$R.D.Spend)),color="Black")
| /svm_cost.R | no_license | F4rbod/Learning_ML | R | false | false | 1,134 | r | install.packages("e1071")
library(e1071)
regressor1=svm(formula=Profit~R.D.Spend,data=training_set,type="eps-regression",gamma=1,cost=0.01)
regressor2=svm(formula=Profit~R.D.Spend,data=training_set,type="eps-regression",gamma=1,cost=0.1)
regressor3=svm(formula=Profit~R.D.Spend,data=training_set,type="eps-regression",gamma=1,cost=1)
regressor4=svm(formula=Profit~R.D.Spend,data=training_set,type="eps-regression",gamma=1,cost=10)
regressor5=svm(formula=Profit~R.D.Spend,data=training_set,type="eps-regression",gamma=1,cost=100)
plot_svr=ggplot(data=training_set)
plot_svr+geom_point(aes(x=R.D.Spend,y=Profit),color="Red",size=0.1)+
geom_line(aes(x=R.D.Spend,y=predict(regressor1,R.D.Spend=training_set$R.D.Spend)),color="Purple")+
geom_line(aes(x=R.D.Spend,y=predict(regressor2,R.D.Spend=training_set$R.D.Spend)),color="Blue")+
geom_line(aes(x=R.D.Spend,y=predict(regressor3,R.D.Spend=training_set$R.D.Spend)),color="Yellow")+
geom_line(aes(x=R.D.Spend,y=predict(regressor4,R.D.Spend=training_set$R.D.Spend)),color="Red")+
geom_line(aes(x=R.D.Spend,y=predict(regressor5,R.D.Spend=training_set$R.D.Spend)),color="Black")
|
#@since 1.9.1
require rdoc/parser
require rdoc/parser/ruby
require rdoc/known_classes
#@else
require rdoc/code_objects
require rdoc/parsers/parserfactory
require rdoc/options
require rdoc/rdoc
#@end
C 언어 소스 코드에서 내장 클래스와 모듈의 문서를
분석하는 서브 라이브러리입니다.
C언어로 쓰여진 확장 라이브러를 분석하는 용도로 쓰입니다.
[[f:rb_define_class]]나 [[f:rb_define_method]] 등에 정의된 것들에@@@
대응되는 C 언어의 함수나 주석을 분석합니다.
예: Array#flattenrb_ary_flatten의 주석을 분석합니다.
/*
* Returns a new array that is a one-dimensional flattening of this
* array (recursively). That is, for every element that is an array,
* extract its elements into the new array.
*
* s = [ 1, 2, 3 ] #=> [1, 2, 3]
* t = [ 4, 5, 6, [7, 8] ] #=> [4, 5, 6, [7, 8]]
* a = [ s, t, 9, 10 ] #=> [[1, 2, 3], [4, 5, 6, [7, 8]], 9, 10]
* a.flatten #=> [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
*/
static VALUE
rb_ary_flatten(ary)
VALUE ary;
{
ary = rb_obj_dup(ary);
rb_ary_flatten_bang(ary);
return ary;
}
...
void
Init_Array()
{
...
rb_define_method(rb_cArray, "flatten", rb_ary_flatten, 0);
위의 예에서 rb_ary_flatten 함수와 Init_Array 함수는 같은 파일에
있어야 합니다.
또한 Ruyb 소스코드와는 별개로 주석에 몇가지 명령을 사용할 수 있습니다.
: Document-class: name
내용을 name에 지정한 Ruby 클래스에 포함시킵니다.예를 들어 같은
.c 파일에 여러개의 클래스가 지정할 필요가 있을 때와 같이 Init_xxx 함수의
xxx 부분이 클래스명과 같지 않을 때 사용합니다.
: Document-method: name
내용을 name에 지정한 Ruby 메소드에 포함시킵니다.
RDoc이 자동적으로 대응되는 메소드를 찾지 못할 때 사용합니다.
: call-seq:
이 명령이 사용된 다음행부터 빈 행이 나올 때까지를 메소드 호출 예로 처리합니다.
또한 RDoc에선 rb_define_method의 정의와 C 언어의 함수 구현이 같은 파일에 있어야합니다.그렇지 않으면 @@@
rb_define_method(....); // in 파일명
예:
/*
* Document-class: MyClass
*
* Encapsulate the writing and reading of the configuration
* file. ...
*/
/*
* Document-method: read_value
*
* call-seq:
* cfg.read_value(key) -> value
* cfg.read_value(key} { |key| } -> value
*
* Return the value corresponding to +key+ from the configuration.
* In the second form, if the key isn't found, invoke the
* block and return its value.
*/
#@since 1.9.1
= class RDoc::Parser::C < RDoc::Parser
#@else
#@include(../RDoc__KNOWN_CLASSES)
= class RDoc::C_Parser
extend RDoc::ParserFactory
#@end
C 言語で記述されたソースコードから組み込みクラス/モジュールのドキュメン
トを解析するためのクラスです。
== Class Methods
#@since 1.9.1
--- new(top_level, file_name, body, options, stats) -> RDoc::Parser::C
#@else
--- new(top_level, file_name, body, options, stats) -> RDoc::C_Parser
#@end
객체를 초기화합니다.
@param top_level [[c:RDoc::TopLevel]] 객체를 지정합니다.
@param file_name 파일명을 나타내는 문자열을 지정합니다.
@param body 소스 코드를 문자열로 지정합니다.
#@since 1.9.1
@param options [[c:RDoc::Options]] 객체를 지정합니다.
#@else
@param options [[c:Options]] 객체를 지정합니다.
#@end
@param stats [[c:RDoc::Stats]] 객체를 지정합니다.
== Instance Methods
--- progress=(val)
진행상황을 출력할 [[c:IO]] 객체를 지정합니다.
@param val 진행상황을 출력할 [[c:IO]]를 지정합니다.지정하지 않으면
[[m:$stderr]]을 사용합니다.
--- scan -> RDoc::TopLevel
C 언어 소스 코드에서 내장 클래스와 모듈의 문서를
분석합니다.
@return [[c:RDoc::TopLevel]] 객체를 리턴합니다.
| /target/rubydoc/refm/api/src/rdoc/parsers/parse_c.rd | no_license | nacyot/omegat-rurima-ruby | R | false | false | 4,094 | rd | #@since 1.9.1
require rdoc/parser
require rdoc/parser/ruby
require rdoc/known_classes
#@else
require rdoc/code_objects
require rdoc/parsers/parserfactory
require rdoc/options
require rdoc/rdoc
#@end
C 언어 소스 코드에서 내장 클래스와 모듈의 문서를
분석하는 서브 라이브러리입니다.
C언어로 쓰여진 확장 라이브러를 분석하는 용도로 쓰입니다.
[[f:rb_define_class]]나 [[f:rb_define_method]] 등에 정의된 것들에@@@
대응되는 C 언어의 함수나 주석을 분석합니다.
예: Array#flattenrb_ary_flatten의 주석을 분석합니다.
/*
* Returns a new array that is a one-dimensional flattening of this
* array (recursively). That is, for every element that is an array,
* extract its elements into the new array.
*
* s = [ 1, 2, 3 ] #=> [1, 2, 3]
* t = [ 4, 5, 6, [7, 8] ] #=> [4, 5, 6, [7, 8]]
* a = [ s, t, 9, 10 ] #=> [[1, 2, 3], [4, 5, 6, [7, 8]], 9, 10]
* a.flatten #=> [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
*/
static VALUE
rb_ary_flatten(ary)
VALUE ary;
{
ary = rb_obj_dup(ary);
rb_ary_flatten_bang(ary);
return ary;
}
...
void
Init_Array()
{
...
rb_define_method(rb_cArray, "flatten", rb_ary_flatten, 0);
위의 예에서 rb_ary_flatten 함수와 Init_Array 함수는 같은 파일에
있어야 합니다.
또한 Ruyb 소스코드와는 별개로 주석에 몇가지 명령을 사용할 수 있습니다.
: Document-class: name
내용을 name에 지정한 Ruby 클래스에 포함시킵니다.예를 들어 같은
.c 파일에 여러개의 클래스가 지정할 필요가 있을 때와 같이 Init_xxx 함수의
xxx 부분이 클래스명과 같지 않을 때 사용합니다.
: Document-method: name
내용을 name에 지정한 Ruby 메소드에 포함시킵니다.
RDoc이 자동적으로 대응되는 메소드를 찾지 못할 때 사용합니다.
: call-seq:
이 명령이 사용된 다음행부터 빈 행이 나올 때까지를 메소드 호출 예로 처리합니다.
또한 RDoc에선 rb_define_method의 정의와 C 언어의 함수 구현이 같은 파일에 있어야합니다.그렇지 않으면 @@@
rb_define_method(....); // in 파일명
예:
/*
* Document-class: MyClass
*
* Encapsulate the writing and reading of the configuration
* file. ...
*/
/*
* Document-method: read_value
*
* call-seq:
* cfg.read_value(key) -> value
* cfg.read_value(key} { |key| } -> value
*
* Return the value corresponding to +key+ from the configuration.
* In the second form, if the key isn't found, invoke the
* block and return its value.
*/
#@since 1.9.1
= class RDoc::Parser::C < RDoc::Parser
#@else
#@include(../RDoc__KNOWN_CLASSES)
= class RDoc::C_Parser
extend RDoc::ParserFactory
#@end
C 言語で記述されたソースコードから組み込みクラス/モジュールのドキュメン
トを解析するためのクラスです。
== Class Methods
#@since 1.9.1
--- new(top_level, file_name, body, options, stats) -> RDoc::Parser::C
#@else
--- new(top_level, file_name, body, options, stats) -> RDoc::C_Parser
#@end
객체를 초기화합니다.
@param top_level [[c:RDoc::TopLevel]] 객체를 지정합니다.
@param file_name 파일명을 나타내는 문자열을 지정합니다.
@param body 소스 코드를 문자열로 지정합니다.
#@since 1.9.1
@param options [[c:RDoc::Options]] 객체를 지정합니다.
#@else
@param options [[c:Options]] 객체를 지정합니다.
#@end
@param stats [[c:RDoc::Stats]] 객체를 지정합니다.
== Instance Methods
--- progress=(val)
진행상황을 출력할 [[c:IO]] 객체를 지정합니다.
@param val 진행상황을 출력할 [[c:IO]]를 지정합니다.지정하지 않으면
[[m:$stderr]]을 사용합니다.
--- scan -> RDoc::TopLevel
C 언어 소스 코드에서 내장 클래스와 모듈의 문서를
분석합니다.
@return [[c:RDoc::TopLevel]] 객체를 리턴합니다.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_feature_parameters.R
\name{calc_df_eff}
\alias{calc_df_eff}
\title{Calculate the effective degrees of freedom}
\usage{
calc_df_eff(X, experimental_design)
}
\description{
The effective degrees of freedom are the sum of all non NA observation
minus the number of non-empty conditions.
}
\keyword{internal}
| /man/calc_df_eff.Rd | no_license | const-ae/proDD | R | false | true | 387 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_feature_parameters.R
\name{calc_df_eff}
\alias{calc_df_eff}
\title{Calculate the effective degrees of freedom}
\usage{
calc_df_eff(X, experimental_design)
}
\description{
The effective degrees of freedom are the sum of all non NA observation
minus the number of non-empty conditions.
}
\keyword{internal}
|
## preparation of success value
purchase_volume <- curve(curve31,
sales_performance_of_all_companies_previous_period)
##
success_value <- list(average_customer_relationship = curve(curve45,
average_customer_relationship),
average_motivation = curve(curve46,average_motivation),
average_product_knowledge = curve(curve57,average_product_knowledge),
average_sales_skills = curve(curve48,average_sales_skills),
contribution_margin = curve(curve49,contribution_margin),
total_revenue = curve(curve51,total_revenue)) | /Codes/useless/cal_success_value.R | no_license | RachelAnqi/Sales_training_programme | R | false | false | 707 | r | ## preparation of success value
purchase_volume <- curve(curve31,
sales_performance_of_all_companies_previous_period)
##
success_value <- list(average_customer_relationship = curve(curve45,
average_customer_relationship),
average_motivation = curve(curve46,average_motivation),
average_product_knowledge = curve(curve57,average_product_knowledge),
average_sales_skills = curve(curve48,average_sales_skills),
contribution_margin = curve(curve49,contribution_margin),
total_revenue = curve(curve51,total_revenue)) |
#' Box plot of the water quality data by month
#'
#' @description
#' Data come from named list, which contains a Sample dataframe with the sample data,
#' and an INFO dataframe with metadata.
#'
#' Box widths are proportional to the square root of the number of samples in the month.
#'
#' Although there are a lot of optional arguments to this function, most are set to a logical default.
#'
#' @param eList named list with at least the Sample and INFO dataframes
#' @param printTitle logical variable if TRUE title is printed, if FALSE not printed (this is best for a multi-plot figure)
#' @param cex numerical value giving the amount by which plotting symbols should be magnified
#' @param cex.axis magnification to be used for axis annotation relative to the current setting of cex
#' @param cex.main magnification to be used for main titles relative to the current setting of cex
#' @param tcl number defaults to 0.5, specifies length of tick marks as fraction of height of a line of text
#' @param tinyPlot logical variable, if TRUE plot is designed to be plotted small as part of a multi-plot figure, default is FALSE.
#' @param logScale logical if TRUE y plotted in log axis
#' @param customPar logical defaults to FALSE. If TRUE, par() should be set by user before calling this function
#' @param las numeric in {0,1,2,3}; the style of axis labels, see ?par
#' @param showXLabels logical defaults to TRUE. If FALSE, the x axis label is not plotted
#' @param showYLabels logical defaults to TRUE. If FALSE, the y axis label is not plotted
#' @param showXAxis logical defaults to TRUE. If FALSE, the x axis is not plotted
#' @param showYAxis logical defaults to TRUE. If FALSE, the y axis is not plotted
#' @param concLab object of concUnit class, or numeric represented the short code,
#' or character representing the descriptive name. By default, this argument sets
#' concentration labels to use either Concentration or Conc (for tiny plots). Units
#' are taken from the eList$INFO$param.units. To use any other words than
#' "Concentration" see \code{vignette(topic = "units", package = "EGRET")}.
#' @param monthLab object of monthLabel class, or numeric represented the short code,
#' or character representing the descriptive name.
#' @param \dots arbitrary graphical parameters that will be passed to genericEGRETDotPlot function (see ?par for options)
#' @keywords graphics water-quality statistics
#' @seealso \code{\link[graphics]{boxplot}}
#' @export
#' @import methods
#' @examples
#' eList <- Choptank_eList
#' # Water year:
#' boxConcMonth(eList)
#' # Graphs consisting of Jun-Aug
#' eList <- setPA(eList, paStart=6,paLong=3)
#' boxConcMonth(eList)
#' spanish_month <- new("monthLabel",
#' monthAbbrev = c("enero", "feb", "marzo", "abr",
#' "mayo", "jun", "jul", "agosto", "set",
#' "oct", "nov", "dic"),
#' monthFull = c("enero", "febrero", "marzo", "abril",
#' "mayo", "junio", "julio", "agosto", "septiembre",
#' "octubre", "noviembre", "diciembre"),
#' monthSingle = c("E", "F", "M", "A", "M", "J", "J",
#' "A", "S", "O", "N", "D"))
#' boxConcMonth(eList, monthLab = spanish_month,
#' showXLabels = FALSE, printTitle = FALSE)
boxConcMonth <- function(eList, printTitle = TRUE,
cex = 0.8, cex.axis = 1.1,
cex.main = 1.1, las = 1,
logScale = FALSE, tcl = 0.5,
tinyPlot = FALSE, customPar = FALSE,
showYLabels = TRUE, concLab = 1,
showXLabels = TRUE, showXAxis = TRUE,
showYAxis = TRUE,
monthLab = 1, ...) {
localINFO <- getInfo(eList)
localSample <- getSample(eList)
if(sum(c("paStart","paLong") %in% names(localINFO)) == 2){
paLong <- localINFO$paLong
paStart <- localINFO$paStart
} else {
paLong <- 12
paStart <- 10
}
localSample <- if(paLong == 12) localSample else selectDays(localSample,paLong,paStart)
title2 <- if(paLong == 12) "" else setSeasonLabelByUser(paStartInput = paStart,paLongInput=paLong)
#This function makes a boxplot of log concentration by month
#Box width is proportional to the square root of the sample size
plotTitle <- if(printTitle) paste(localINFO$shortName, "\n", localINFO$paramShortName,"\nBoxplots of sample values by month") else ""
# nameList <- sapply(c(1:12),function(x){monthINFO[[x]]@monthSingle})
if (is.numeric(monthLab)){
monthInfo <- monthInfo[shortCode=monthLab][[1]]
} else if (is.character(monthLab)){
monthInfo <- monthInfo[monthLab][[1]]
} else {
monthInfo <- monthLab
}
nameList <- monthInfo@monthAbbrev
namesListFactor <- factor(nameList, levels = nameList)
monthList <- nameList[localSample$Month]
monthList <- factor(monthList, levels = nameList)
tempDF <- data.frame(month = monthList,
conc = localSample$ConcAve)
maxY <- 1.02*max(localSample$ConcHigh, na.rm = TRUE)
ySpan <- c(0,maxY)
if(logScale){
logScaleText <- "y"
} else {
logScaleText <- ""
}
if (tinyPlot) {
if (!customPar) par(mar = c(4,5,1,0.1),cex.lab = cex.axis, tcl = 0.5)
names <- monthInfo@monthSingle
} else {
if (!customPar) par(mar=c(5,6,4,2)+0.1,cex.lab=cex.axis,tcl=0.5)
names <- monthInfo@monthAbbrev
}
yInfo <- generalAxis(x = tempDF$conc,
maxVal = maxY,
minVal = min(localSample$ConcHigh, na.rm=TRUE),
tinyPlot = tinyPlot,
logScale = logScale,
concLab = concLab,
units = localINFO$param.units)
yTicksLab <- prettyNum(yInfo$ticks)
boxplot(tempDF$conc ~ tempDF$month,
ylim = c(yInfo$bottom,yInfo$top),
yaxs = "i",
yTicks = yInfo$ticks,
varwidth = TRUE, yaxt = "n",
names = names,
xlab = if(showXLabels) "Month" else "",
ylab = if(showYLabels) yInfo$label else "",
main = plotTitle,
cex = cex, cex.axis = cex.axis, cex.main = cex.main,
las = las, tcl = tcl,
log = logScaleText,
...)
if(showYAxis){
axis(2,tcl=tcl,las=las,at=yInfo$ticks,cex.axis=cex.axis,labels=yTicksLab)
} else {
axis(2,tcl=tcl,las=las,at=yInfo$ticks,cex.axis=cex.axis,labels=FALSE)
}
if (!tinyPlot) mtext(title2,side=3,line=-1.5)
} | /R/boxConcMonth.R | permissive | ldecicco-USGS/EGRET | R | false | false | 6,670 | r | #' Box plot of the water quality data by month
#'
#' @description
#' Data come from named list, which contains a Sample dataframe with the sample data,
#' and an INFO dataframe with metadata.
#'
#' Box widths are proportional to the square root of the number of samples in the month.
#'
#' Although there are a lot of optional arguments to this function, most are set to a logical default.
#'
#' @param eList named list with at least the Sample and INFO dataframes
#' @param printTitle logical variable if TRUE title is printed, if FALSE not printed (this is best for a multi-plot figure)
#' @param cex numerical value giving the amount by which plotting symbols should be magnified
#' @param cex.axis magnification to be used for axis annotation relative to the current setting of cex
#' @param cex.main magnification to be used for main titles relative to the current setting of cex
#' @param tcl number defaults to 0.5, specifies length of tick marks as fraction of height of a line of text
#' @param tinyPlot logical variable, if TRUE plot is designed to be plotted small as part of a multi-plot figure, default is FALSE.
#' @param logScale logical if TRUE y plotted in log axis
#' @param customPar logical defaults to FALSE. If TRUE, par() should be set by user before calling this function
#' @param las numeric in {0,1,2,3}; the style of axis labels, see ?par
#' @param showXLabels logical defaults to TRUE. If FALSE, the x axis label is not plotted
#' @param showYLabels logical defaults to TRUE. If FALSE, the y axis label is not plotted
#' @param showXAxis logical defaults to TRUE. If FALSE, the x axis is not plotted
#' @param showYAxis logical defaults to TRUE. If FALSE, the y axis is not plotted
#' @param concLab object of concUnit class, or numeric represented the short code,
#' or character representing the descriptive name. By default, this argument sets
#' concentration labels to use either Concentration or Conc (for tiny plots). Units
#' are taken from the eList$INFO$param.units. To use any other words than
#' "Concentration" see \code{vignette(topic = "units", package = "EGRET")}.
#' @param monthLab object of monthLabel class, or numeric represented the short code,
#' or character representing the descriptive name.
#' @param \dots arbitrary graphical parameters that will be passed to genericEGRETDotPlot function (see ?par for options)
#' @keywords graphics water-quality statistics
#' @seealso \code{\link[graphics]{boxplot}}
#' @export
#' @import methods
#' @examples
#' eList <- Choptank_eList
#' # Water year:
#' boxConcMonth(eList)
#' # Graphs consisting of Jun-Aug
#' eList <- setPA(eList, paStart=6,paLong=3)
#' boxConcMonth(eList)
#' spanish_month <- new("monthLabel",
#' monthAbbrev = c("enero", "feb", "marzo", "abr",
#' "mayo", "jun", "jul", "agosto", "set",
#' "oct", "nov", "dic"),
#' monthFull = c("enero", "febrero", "marzo", "abril",
#' "mayo", "junio", "julio", "agosto", "septiembre",
#' "octubre", "noviembre", "diciembre"),
#' monthSingle = c("E", "F", "M", "A", "M", "J", "J",
#' "A", "S", "O", "N", "D"))
#' boxConcMonth(eList, monthLab = spanish_month,
#' showXLabels = FALSE, printTitle = FALSE)
boxConcMonth <- function(eList, printTitle = TRUE,
cex = 0.8, cex.axis = 1.1,
cex.main = 1.1, las = 1,
logScale = FALSE, tcl = 0.5,
tinyPlot = FALSE, customPar = FALSE,
showYLabels = TRUE, concLab = 1,
showXLabels = TRUE, showXAxis = TRUE,
showYAxis = TRUE,
monthLab = 1, ...) {
localINFO <- getInfo(eList)
localSample <- getSample(eList)
if(sum(c("paStart","paLong") %in% names(localINFO)) == 2){
paLong <- localINFO$paLong
paStart <- localINFO$paStart
} else {
paLong <- 12
paStart <- 10
}
localSample <- if(paLong == 12) localSample else selectDays(localSample,paLong,paStart)
title2 <- if(paLong == 12) "" else setSeasonLabelByUser(paStartInput = paStart,paLongInput=paLong)
#This function makes a boxplot of log concentration by month
#Box width is proportional to the square root of the sample size
plotTitle <- if(printTitle) paste(localINFO$shortName, "\n", localINFO$paramShortName,"\nBoxplots of sample values by month") else ""
# nameList <- sapply(c(1:12),function(x){monthINFO[[x]]@monthSingle})
if (is.numeric(monthLab)){
monthInfo <- monthInfo[shortCode=monthLab][[1]]
} else if (is.character(monthLab)){
monthInfo <- monthInfo[monthLab][[1]]
} else {
monthInfo <- monthLab
}
nameList <- monthInfo@monthAbbrev
namesListFactor <- factor(nameList, levels = nameList)
monthList <- nameList[localSample$Month]
monthList <- factor(monthList, levels = nameList)
tempDF <- data.frame(month = monthList,
conc = localSample$ConcAve)
maxY <- 1.02*max(localSample$ConcHigh, na.rm = TRUE)
ySpan <- c(0,maxY)
if(logScale){
logScaleText <- "y"
} else {
logScaleText <- ""
}
if (tinyPlot) {
if (!customPar) par(mar = c(4,5,1,0.1),cex.lab = cex.axis, tcl = 0.5)
names <- monthInfo@monthSingle
} else {
if (!customPar) par(mar=c(5,6,4,2)+0.1,cex.lab=cex.axis,tcl=0.5)
names <- monthInfo@monthAbbrev
}
yInfo <- generalAxis(x = tempDF$conc,
maxVal = maxY,
minVal = min(localSample$ConcHigh, na.rm=TRUE),
tinyPlot = tinyPlot,
logScale = logScale,
concLab = concLab,
units = localINFO$param.units)
yTicksLab <- prettyNum(yInfo$ticks)
boxplot(tempDF$conc ~ tempDF$month,
ylim = c(yInfo$bottom,yInfo$top),
yaxs = "i",
yTicks = yInfo$ticks,
varwidth = TRUE, yaxt = "n",
names = names,
xlab = if(showXLabels) "Month" else "",
ylab = if(showYLabels) yInfo$label else "",
main = plotTitle,
cex = cex, cex.axis = cex.axis, cex.main = cex.main,
las = las, tcl = tcl,
log = logScaleText,
...)
if(showYAxis){
axis(2,tcl=tcl,las=las,at=yInfo$ticks,cex.axis=cex.axis,labels=yTicksLab)
} else {
axis(2,tcl=tcl,las=las,at=yInfo$ticks,cex.axis=cex.axis,labels=FALSE)
}
if (!tinyPlot) mtext(title2,side=3,line=-1.5)
} |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @describeIn imager.colourspaces RGB to HSL conversion
#' @export
RGBtoHSL <- function(im) {
.Call(`_imager_RGBtoHSL`, im)
}
#' @describeIn imager.colourspaces CIE RGB to CIE XYZ (1931) conversion, D65 white point
#' @export
RGBtoXYZ <- function(im) {
.Call(`_imager_RGBtoXYZ`, im)
}
#' @describeIn imager.colourspaces CIE XYZ to CIE RGB (1931) conversion, D65 white point
#' @export
XYZtoRGB <- function(im) {
.Call(`_imager_XYZtoRGB`, im)
}
#' @describeIn imager.colourspaces HSL to RGB conversion
#' @export
HSLtoRGB <- function(im) {
.Call(`_imager_HSLtoRGB`, im)
}
#' @describeIn imager.colourspaces RGB to HSV conversion
#' @export
RGBtoHSV <- function(im) {
.Call(`_imager_RGBtoHSV`, im)
}
#' @describeIn imager.colourspaces HSV to RGB conversion
#' @export
HSVtoRGB <- function(im) {
.Call(`_imager_HSVtoRGB`, im)
}
#' @describeIn imager.colourspaces RGB to HSI conversion
#' @export
RGBtoHSI <- function(im) {
.Call(`_imager_RGBtoHSI`, im)
}
#' @describeIn imager.colourspaces HSI to RGB conversion
#' @export
HSItoRGB <- function(im) {
.Call(`_imager_HSItoRGB`, im)
}
#' @describeIn imager.colourspaces RGB to sRGB conversion
#' @export
RGBtosRGB <- function(im) {
.Call(`_imager_RGBtosRGB`, im)
}
#' @describeIn imager.colourspaces sRGB to RGB conversion
#' @export
sRGBtoRGB <- function(im) {
.Call(`_imager_sRGBtoRGB`, im)
}
#' @describeIn imager.colourspaces RGB to YCbCr conversion
#' @export
RGBtoYCbCr <- function(im) {
.Call(`_imager_RGBtoYCbCr`, im)
}
#' @describeIn imager.colourspaces YCbCr to RGB conversion
#' @export
YCbCrtoRGB <- function(im) {
.Call(`_imager_YCbCrtoRGB`, im)
}
#' @describeIn imager.colourspaces RGB to YUV conversion
#' @export
RGBtoYUV <- function(im) {
.Call(`_imager_RGBtoYUV`, im)
}
#' @describeIn imager.colourspaces YUV to RGB conversion
#' @export
YUVtoRGB <- function(im) {
.Call(`_imager_YUVtoRGB`, im)
}
#' @describeIn imager.colourspaces Lab to RGB (linear)
#' @export
LabtoRGB <- function(im) {
.Call(`_imager_LabtoRGB`, im)
}
#' @describeIn imager.colourspaces RGB (linear) to Lab
#' @export
RGBtoLab <- function(im) {
.Call(`_imager_RGBtoLab`, im)
}
#' @describeIn imager.colourspaces Lab to XYZ
#' @export
LabtoXYZ <- function(im) {
.Call(`_imager_LabtoXYZ`, im)
}
#' @describeIn imager.colourspaces XYZ to Lab
#' @export
XYZtoLab <- function(im) {
.Call(`_imager_XYZtoLab`, im)
}
#' @describeIn imager.colourspaces Lab to sRGB
#' @export
LabtosRGB <- function(im) {
.Call(`_imager_LabtosRGB`, im)
}
#' @describeIn imager.colourspaces sRGB to Lab
#' @export
sRGBtoLab <- function(im) {
.Call(`_imager_sRGBtoLab`, im)
}
getXc <- function(x, y, z, c) {
.Call(`_imager_getXc`, x, y, z, c)
}
getYc <- function(x, y, z, c) {
.Call(`_imager_getYc`, x, y, z, c)
}
getZc <- function(x, y, z, c) {
.Call(`_imager_getZc`, x, y, z, c)
}
getCc <- function(x, y, z, c) {
.Call(`_imager_getCc`, x, y, z, c)
}
display_ <- function(im, rescale = TRUE) {
invisible(.Call(`_imager_display_`, im, rescale))
}
display_list <- function(imlist) {
invisible(.Call(`_imager_display_list`, imlist))
}
#' Play a video
#'
#' A very basic video player. Press the space bar to pause and ESC to close.
#' @param vid A cimg object, to be played as video
#' @param loop loop the video (default false)
#' @param delay delay between frames, in ms. Default 30.
#' @param normalise if true pixel values are rescaled to 0...255 (default TRUE). The normalisation is based on the *first frame*. If you don't want the default behaviour you can normalise by hand. Default TRUE.
#' @export
play <- function(vid, loop = FALSE, delay = 30L, normalise = TRUE) {
invisible(.Call(`_imager_play`, vid, loop, delay, normalise))
}
select <- function(im, type = 2L) {
.Call(`_imager_select`, im, type)
}
bucket_fill <- function(im, x, y, z, color, opacity = 1, sigma = 0, high_connexity = FALSE) {
.Call(`_imager_bucket_fill`, im, x, y, z, color, opacity, sigma, high_connexity)
}
bucket_select <- function(im, x, y, z, sigma = 0, high_connexity = FALSE) {
.Call(`_imager_bucket_select`, im, x, y, z, sigma, high_connexity)
}
draw_circle <- function(im, x, y, radius, color, opacity = 1, filled = TRUE) {
.Call(`_imager_draw_circle`, im, x, y, radius, color, opacity, filled)
}
draw_circle_ <- function(im, x, y, radius, color, opacity = 1L, filled = TRUE) {
.Call(`_imager_draw_circle_`, im, x, y, radius, color, opacity, filled)
}
draw_rect_ <- function(im, x0, y0, x1, y1, color, opacity = 1, filled = TRUE) {
.Call(`_imager_draw_rect_`, im, x0, y0, x1, y1, color, opacity, filled)
}
draw_poly_ <- function(im, points, color, opacity = 1) {
.Call(`_imager_draw_poly_`, im, points, color, opacity)
}
draw_text_ <- function(im, x, y, text, color, opacity = 1, fsize = 20L) {
.Call(`_imager_draw_text_`, im, x, y, text, color, opacity, fsize)
}
#' Apply recursive Deriche filter.
#'
#' The Deriche filter is a fast approximation to a Gaussian filter (order = 0), or Gaussian derivatives (order = 1 or 2).
#'
#' @param im an image
#' @param sigma Standard deviation of the filter.
#' @param order Order of the filter. 0 for a smoothing filter, 1 for first-derivative, 2 for second.
#' @param axis Axis along which the filter is computed ( 'x' , 'y', 'z' or 'c').
#' @param neumann If true, use Neumann boundary conditions (default false, Dirichlet)
#' @export
#' @examples
#' deriche(boats,sigma=2,order=0) %>% plot("Zeroth-order Deriche along x")
#' deriche(boats,sigma=2,order=1) %>% plot("First-order Deriche along x")
#' deriche(boats,sigma=2,order=1) %>% plot("Second-order Deriche along x")
#' deriche(boats,sigma=2,order=1,axis="y") %>% plot("Second-order Deriche along y")
deriche <- function(im, sigma, order = 0L, axis = 'x', neumann = FALSE) {
.Call(`_imager_deriche`, im, sigma, order, axis, neumann)
}
#' Young-Van Vliet recursive Gaussian filter.
#'
#' The Young-van Vliet filter is a fast approximation to a Gaussian filter (order = 0), or Gaussian derivatives (order = 1 or 2).
#'
#' @param im an image
#' @param sigma standard deviation of the Gaussian filter
#' @param order the order of the filter 0,1,2,3
#' @param axis Axis along which the filter is computed. One of 'x', 'y', 'z', 'c'
#' @param neumann If true, use Neumann boundary conditions (default false, Dirichlet)
#' @references
#' From: I.T. Young, L.J. van Vliet, M. van Ginkel, Recursive Gabor filtering.
#' IEEE Trans. Sig. Proc., vol. 50, pp. 2799-2805, 2002.
#' (this is an improvement over Young-Van Vliet, Sig. Proc. 44, 1995)
#'
#' Boundary conditions (only for order 0) using Triggs matrix, from
#' B. Triggs and M. Sdika. Boundary conditions for Young-van Vliet
#' recursive filtering. IEEE Trans. Signal Processing,
#' vol. 54, pp. 2365-2367, 2006.
#' @examples
#' vanvliet(boats,sigma=2,order=0) %>% plot("Zeroth-order Young-van Vliet along x")
#' vanvliet(boats,sigma=2,order=1) %>% plot("First-order Young-van Vliet along x")
#' vanvliet(boats,sigma=2,order=1) %>% plot("Second-order Young-van Vliet along x")
#' vanvliet(boats,sigma=2,order=1,axis="y") %>% plot("Second-order Young-van Vliet along y")
#' @export
vanvliet <- function(im, sigma, order = 0L, axis = 'x', neumann = FALSE) {
.Call(`_imager_vanvliet`, im, sigma, order, axis, neumann)
}
isoblur_ <- function(im, sigma, neumann = TRUE, gaussian = FALSE) {
.Call(`_imager_isoblur_`, im, sigma, neumann, gaussian)
}
#' Blur image with the median filter.
#'
#' In a window of size n x n centered at pixel (x,y), compute median pixel value over the window. Optionally, ignore values that are too far from the value at current pixel.
#'
#' @param im an image
#' @param n Size of the median filter.
#' @param threshold Threshold used to discard pixels too far from the current pixel value in the median computation. Can be used for edge-preserving smoothing. Default 0 (include all pixels in window).
#' @export
#' @examples
#' medianblur(boats,5) %>% plot(main="Median blur, 5 pixels")
#' medianblur(boats,10) %>% plot(main="Median blur, 10 pixels")
#' medianblur(boats,10,8) %>% plot(main="Median blur, 10 pixels, threshold = 8")
#' @seealso isoblur, boxblur
medianblur <- function(im, n, threshold = 0) {
.Call(`_imager_medianblur`, im, n, threshold)
}
#' Blur image with a box filter (square window)
#' @param im an image
#' @param boxsize Size of the box window (can be subpixel).
#' @param neumann If true, use Neumann boundary conditions, Dirichlet otherwise (default true, Neumann)
#' @seealso deriche(), vanvliet().
#' @examples
#' boxblur(boats,5) %>% plot(main="Dirichlet boundary")
#' boxblur(boats,5,TRUE) %>% plot(main="Neumann boundary")
#' @export
boxblur <- function(im, boxsize, neumann = TRUE) {
.Call(`_imager_boxblur`, im, boxsize, neumann)
}
#' Compute image Laplacian
#'
#' The Laplacian is the sum of second derivatives, approximated here using finite differences.
#' @param im an image
#' @examples
#' imlap(boats) %>% plot
#' @export
imlap <- function(im) {
.Call(`_imager_imlap`, im)
}
#' Blur image with a box filter.
#'
#' This is a recursive algorithm, not depending on the values of the box kernel size.
#'
#' @param im an image
#' @param sx Size of the box window, along the X-axis.
#' @param sy Size of the box window, along the Y-axis.
#' @param neumann If true, use Neumann boundary conditions, Dirichlet otherwise (default true, Neumann)
#' @seealso blur().
#'
#' @export
#' @examples
#' boxblur_xy(boats,20,5) %>% plot(main="Anisotropic blur")
boxblur_xy <- function(im, sx, sy, neumann = TRUE) {
.Call(`_imager_boxblur_xy`, im, sx, sy, neumann)
}
#' Correlation/convolution of image by filter
#'
#' The correlation of image im by filter flt is defined as:
#' \eqn{res(x,y,z) = sum_{i,j,k} im(x + i,y + j,z + k)*flt(i,j,k).}
#' The convolution of an image img by filter flt is defined to be:
#' \eqn{res(x,y,z) = sum_{i,j,k} img(x-i,y-j,z-k)*flt(i,j,k)}
#'
#' @param im an image
#' @param filter the correlation kernel.
#' @param dirichlet boundary condition. Dirichlet if true, Neumann if false (default TRUE, Dirichlet)
#' @param normalise compute a normalised correlation (ie. local cosine similarity)
#'
#'
#' @export
#' @examples
#' #Edge filter
#' filter <- as.cimg(function(x,y) sign(x-5),10,10)
#' layout(t(1:2))
#' #Convolution vs. correlation
#' correlate(boats,filter) %>% plot(main="Correlation")
#' convolve(boats,filter) %>% plot(main="Convolution")
correlate <- function(im, filter, dirichlet = TRUE, normalise = FALSE) {
.Call(`_imager_correlate`, im, filter, dirichlet, normalise)
}
#' @describeIn correlate convolve image with filter
#' @export
convolve <- function(im, filter, dirichlet = TRUE, normalise = FALSE) {
.Call(`_imager_convolve`, im, filter, dirichlet, normalise)
}
sharpen <- function(im, amplitude, sharpen_type = FALSE, edge = 1, alpha = 0, sigma = 0) {
.Call(`_imager_sharpen`, im, amplitude, sharpen_type, edge, alpha, sigma)
}
#' Compute image gradient.
#'
#' @param im an image
#' @param axes Axes considered for the gradient computation, as a C-string (e.g "xy").
#' @param scheme = Numerical scheme used for the gradient computation:
#' 1 = Backward finite differences
#' 0 = Centered finite differences
#' 1 = Forward finite differences
#' 2 = Using Sobel masks
#' 3 = Using rotation invariant masks
#' 4 = Using Deriche recursive filter.
#' 5 = Using Van Vliet recursive filter.
#' @return a list of images (corresponding to the different directions)
#' @export
#' @seealso imgradient
get_gradient <- function(im, axes = "", scheme = 3L) {
.Call(`_imager_get_gradient`, im, axes, scheme)
}
#' Return image hessian.
#' @param im an image
#' @param axes Axes considered for the hessian computation, as a character string (e.g "xy").
get_hessian <- function(im, axes = "") {
.Call(`_imager_get_hessian`, im, axes)
}
#' Compute field of diffusion tensors for edge-preserving smoothing.
#'
#' @param im an image
#' @param sharpness Sharpness
#' @param anisotropy Anisotropy
#' @param alpha Standard deviation of the gradient blur.
#' @param sigma Standard deviation of the structure tensor blur.
#' @param is_sqrt Tells if the square root of the tensor field is computed instead.
#' @export
diffusion_tensors <- function(im, sharpness = 0.7, anisotropy = 0.6, alpha = 0.6, sigma = 1.1, is_sqrt = FALSE) {
.Call(`_imager_diffusion_tensors`, im, sharpness, anisotropy, alpha, sigma, is_sqrt)
}
#' Compute Haar multiscale wavelet transform.
#'
#' @param im an image
#' @param inverse Compute inverse transform (default FALSE)
#' @param nb_scales Number of scales used for the transform.
#' @export
#' @examples
#' #Image compression: set small Haar coefficients to 0
#' hr <- haar(boats,nb=3)
#' mask.low <- threshold(abs(hr),"75%")
#' mask.high <- threshold(abs(hr),"95%")
#' haar(hr*mask.low,inverse=TRUE,nb=3) %>% plot(main="75% compression")
#' haar(hr*mask.high,inverse=TRUE,nb=3) %>% plot(main="95% compression")
haar <- function(im, inverse = FALSE, nb_scales = 1L) {
.Call(`_imager_haar`, im, inverse, nb_scales)
}
FFT_complex <- function(real, imag, inverse = FALSE, nb_threads = 0L) {
.Call(`_imager_FFT_complex`, real, imag, inverse, nb_threads)
}
FFT_realim <- function(real, inverse = FALSE, nb_threads = 0L) {
.Call(`_imager_FFT_realim`, real, inverse, nb_threads)
}
FFT_realout <- function(real, imag, inverse = FALSE, nb_threads = 0L) {
.Call(`_imager_FFT_realout`, real, imag, inverse, nb_threads)
}
#' Estimate displacement field between two images.
#'
#' @param sourceIm Reference image.
#' @param destIm Reference image.
#' @param smoothness Smoothness of estimated displacement field.
#' @param precision Precision required for algorithm convergence.
#' @param nb_scales Number of scales used to estimate the displacement field.
#' @param iteration_max Maximum number of iterations allowed for one scale.
#' @param is_backward If false, match I2(X + U(X)) = I1(X), else match I2(X) = I1(X - U(X)).
#' @export
displacement <- function(sourceIm, destIm, smoothness = 0.1, precision = 5.0, nb_scales = 0L, iteration_max = 10000L, is_backward = FALSE) {
.Call(`_imager_displacement`, sourceIm, destIm, smoothness, precision, nb_scales, iteration_max, is_backward)
}
#' Blur image anisotropically, in an edge-preserving way.
#'
#' Standard blurring removes noise from images, but tends to smooth away edges in the process. This anisotropic filter preserves edges better.
#'
#' @param im an image
#' @param amplitude Amplitude of the smoothing.
#' @param sharpness Sharpness.
#' @param anisotropy Anisotropy.
#' @param alpha Standard deviation of the gradient blur.
#' @param sigma Standard deviation of the structure tensor blur.
#' @param dl Spatial discretization.
#' @param da Angular discretization.
#' @param gauss_prec Precision of the diffusion process.
#' @param interpolation_type Interpolation scheme.
#' Can be 0=nearest-neighbor | 1=linear | 2=Runge-Kutta
#' @param fast_approx If true, use fast approximation (default TRUE)
#' @export
#' @examples
#' im <- load.image(system.file('extdata/Leonardo_Birds.jpg',package='imager'))
#' im.noisy <- (im + 80*rnorm(prod(dim(im))))
#' blur_anisotropic(im.noisy,ampl=1e4,sharp=1) %>% plot
blur_anisotropic <- function(im, amplitude, sharpness = 0.7, anisotropy = 0.6, alpha = 0.6, sigma = 1.1, dl = 0.8, da = 30, gauss_prec = 2, interpolation_type = 0L, fast_approx = TRUE) {
.Call(`_imager_blur_anisotropic`, im, amplitude, sharpness, anisotropy, alpha, sigma, dl, da, gauss_prec, interpolation_type, fast_approx)
}
periodic_part <- function(im) {
.Call(`_imager_periodic_part`, im)
}
hough_line_px <- function(px, theta) {
.Call(`_imager_hough_line_px`, px, theta)
}
hough_line_grad <- function(im, ntheta, alpha = 1.5) {
.Call(`_imager_hough_line_grad`, im, ntheta, alpha)
}
hough_circle_ <- function(px, radius) {
.Call(`_imager_hough_circle_`, px, radius)
}
bgraph <- function(px) {
.Call(`_imager_bgraph`, px)
}
interact_ <- function(fun, init, title = "") {
.Call(`_imager_interact_`, fun, init, title)
}
interp_xy <- function(inp, ix, iy, z = 0L, c = 0L, cubic = FALSE) {
.Call(`_imager_interp_xy`, inp, ix, iy, z, c, cubic)
}
interp_xyz <- function(inp, ix, iy, iz, c = 0L, cubic = FALSE) {
.Call(`_imager_interp_xyz`, inp, ix, iy, iz, c, cubic)
}
interp_xyzc <- function(inp, ix, iy, iz, ic, cubic = FALSE) {
.Call(`_imager_interp_xyzc`, inp, ix, iy, iz, ic, cubic)
}
interp_xyc <- function(inp, ix, iy, z, ic, cubic = FALSE) {
.Call(`_imager_interp_xyc`, inp, ix, iy, z, ic, cubic)
}
#' Label connected components.
#'
#' The algorithm of connected components computation has been primarily done
#'by A. Meijster, according to the publication:
#''W.H. Hesselink, A. Meijster, C. Bron, "Concurrent Determination of Connected Components.",
#' In: Science of Computer Programming 41 (2001), pp. 173--194'.
#'
#' @param im an image
#' @param high_connectivity 4(false)- or 8(true)-connectivity
#' in 2d case, and between 6(false)- or 26(true)-connectivity in 3d case. Default FALSE
#' @param tolerance Tolerance used to determine if two neighboring pixels belong to the same region.
#' @export
#' @examples
#' imname <- system.file('extdata/parrots.png',package='imager')
#' im <- load.image(imname) %>% grayscale
#' #Thresholding yields different discrete regions of high intensity
#' regions <- isoblur(im,10) %>% threshold("97%")
#' labels <- label(regions)
#' layout(t(1:2))
#' plot(regions,"Regions")
#' plot(labels,"Labels")
#'
label <- function(im, high_connectivity = FALSE, tolerance = 0) {
.Call(`_imager_label`, im, high_connectivity, tolerance)
}
blabel <- function(im, high_connectivity = FALSE) {
.Call(`_imager_blabel`, im, high_connectivity)
}
#' Erode/dilate image by a structuring element.
#'
#' @param im an image
#' @param size size of the structuring element.
#' @param mask Structuring element.
#' @param boundary_conditions Boundary conditions. If FALSE, pixels beyond image boundaries are considered to be 0, if TRUE one. Default: TRUE.
#' @param real_mode If TRUE, perform erosion as defined on the reals. If FALSE, perform binary erosion (default FALSE).
#' @export
#' @examples
#' fname <- system.file('extdata/Leonardo_Birds.jpg',package='imager')
#' im <- load.image(fname) %>% grayscale
#' outline <- threshold(-im,"95%")
#' plot(outline)
#' mask <- imfill(5,10,val=1) #Rectangular mask
#' plot(erode(outline,mask))
#' plot(erode_rect(outline,5,10)) #Same thing
#' plot(erode_square(outline,5))
#' plot(dilate(outline,mask))
#' plot(dilate_rect(outline,5,10))
#' plot(dilate_square(outline,5))
erode <- function(im, mask, boundary_conditions = TRUE, real_mode = FALSE) {
.Call(`_imager_erode`, im, mask, boundary_conditions, real_mode)
}
berode <- function(im, mask, boundary_conditions = TRUE) {
.Call(`_imager_berode`, im, mask, boundary_conditions)
}
#' @describeIn erode Erode image by a rectangular structuring element of specified size.
#' @param sx Width of the structuring element.
#' @param sy Height of the structuring element.
#' @param sz Depth of the structuring element.
#' @export
erode_rect <- function(im, sx, sy, sz = 1L) {
.Call(`_imager_erode_rect`, im, sx, sy, sz)
}
berode_rect <- function(im, sx, sy, sz = 1L) {
.Call(`_imager_berode_rect`, im, sx, sy, sz)
}
#' @describeIn erode Erode image by a square structuring element of specified size.
#'
#' @export
erode_square <- function(im, size) {
.Call(`_imager_erode_square`, im, size)
}
berode_square <- function(im, size) {
.Call(`_imager_berode_square`, im, size)
}
#' @describeIn erode Dilate image by a structuring element.
#' @export
dilate <- function(im, mask, boundary_conditions = TRUE, real_mode = FALSE) {
.Call(`_imager_dilate`, im, mask, boundary_conditions, real_mode)
}
bdilate <- function(im, mask, boundary_conditions = TRUE) {
.Call(`_imager_bdilate`, im, mask, boundary_conditions)
}
#' @describeIn erode Dilate image by a rectangular structuring element of specified size
#' @export
dilate_rect <- function(im, sx, sy, sz = 1L) {
.Call(`_imager_dilate_rect`, im, sx, sy, sz)
}
bdilate_rect <- function(im, sx, sy, sz = 1L) {
.Call(`_imager_bdilate_rect`, im, sx, sy, sz)
}
#' @describeIn erode Dilate image by a square structuring element of specified size
#' @export
dilate_square <- function(im, size) {
.Call(`_imager_dilate_square`, im, size)
}
bdilate_square <- function(im, size) {
.Call(`_imager_bdilate_square`, im, size)
}
#' Compute watershed transform.
#'
#' The watershed transform is a label propagation algorithm. The value of non-zero pixels will get propagated to their zero-value neighbours. The propagation is controlled by a priority map. See examples.
#' @param im an image
#' @param priority Priority map.
#' @param fill_lines Sets if watershed lines must be filled or not.
#' @examples
#' #In our initial image we'll place three seeds
#' #(non-zero pixels) at various locations, with values 1, 2 and 3.
#' #We'll use the watershed algorithm to propagate these values
#' imd <- function(x,y) imdirac(c(100,100,1,1),x,y)
#' im <- imd(20,20)+2*imd(40,40)+3*imd(80,80)
#' layout(t(1:3))
#' plot(im,main="Seed image")
#' #Now we build an priority map: neighbours of our seeds
#' #should get high priority.
#' #We'll use a distance map for that
#' p <- 1-distance_transform(sign(im),1)
#' plot(p,main="Priority map")
#' watershed(im,p) %>% plot(main="Watershed transform")
#' @export
watershed <- function(im, priority, fill_lines = TRUE) {
.Call(`_imager_watershed`, im, priority, fill_lines)
}
#' Compute Euclidean distance function to a specified value.
#'
#' The distance transform implementation has been submitted by A. Meijster, and implements
#' the article 'W.H. Hesselink, A. Meijster, J.B.T.M. Roerdink,
#' "A general algorithm for computing distance transforms in linear time.",
#' In: Mathematical Morphology and its Applications to Image and Signal Processing,
#' J. Goutsias, L. Vincent, and D.S. Bloomberg (eds.), Kluwer, 2000, pp. 331-340.'
#' The submitted code has then been modified to fit CImg coding style and constraints.
#' @param im an image
#' @param value Reference value.
#' @param metric Type of metric. Can be <tt>{ 0=Chebyshev | 1=Manhattan | 2=Euclidean | 3=Squared-euclidean }</tt>.
#' @export
#' @examples
#' imd <- function(x,y) imdirac(c(100,100,1,1),x,y)
#' #Image is three white dots
#' im <- imd(20,20)+imd(40,40)+imd(80,80)
#' plot(im)
#' #How far are we from the nearest white dot?
#' distance_transform(im,1) %>% plot
distance_transform <- function(im, value, metric = 2L) {
.Call(`_imager_distance_transform`, im, value, metric)
}
bdistance_transform <- function(im, value = TRUE, metric = 2L) {
.Call(`_imager_bdistance_transform`, im, value, metric)
}
#' @describeIn erode Morphological opening (erosion followed by dilation)
#' @export
mopening <- function(im, mask, boundary_conditions = TRUE, real_mode = FALSE) {
.Call(`_imager_mopening`, im, mask, boundary_conditions, real_mode)
}
#' @describeIn erode Morphological opening by a square element (erosion followed by dilation)
#' @export
mopening_square <- function(im, size) {
.Call(`_imager_mopening_square`, im, size)
}
#' @describeIn erode Morphological closing by a square element (dilation followed by erosion)
#' @export
mclosing_square <- function(im, size) {
.Call(`_imager_mclosing_square`, im, size)
}
#' @describeIn erode Morphological closing (dilation followed by erosion)
#' @export
mclosing <- function(im, mask, boundary_conditions = TRUE, real_mode = FALSE) {
.Call(`_imager_mclosing`, im, mask, boundary_conditions, real_mode)
}
reduce_wsum <- function(x, w, na_rm = FALSE) {
.Call(`_imager_reduce_wsum`, x, w, na_rm)
}
reduce_average <- function(x, na_rm = FALSE) {
.Call(`_imager_reduce_average`, x, na_rm)
}
reduce_prod <- function(x, na_rm = FALSE) {
.Call(`_imager_reduce_prod`, x, na_rm)
}
reduce_minmax <- function(x, na_rm = FALSE, max = TRUE) {
.Call(`_imager_reduce_minmax`, x, na_rm, max)
}
reduce_list <- function(x, summary = 0L) {
.Call(`_imager_reduce_list`, x, summary)
}
reduce_list2 <- function(x, summary = 0L) {
.Call(`_imager_reduce_list2`, x, summary)
}
reduce_med <- function(x, na_rm = FALSE) {
.Call(`_imager_reduce_med`, x, na_rm)
}
psort <- function(x, increasing = TRUE) {
.Call(`_imager_psort`, x, increasing)
}
porder <- function(x, increasing = TRUE) {
.Call(`_imager_porder`, x, increasing)
}
prank <- function(x, increasing = TRUE) {
.Call(`_imager_prank`, x, increasing)
}
autocrop_ <- function(im, color, axes = "zyx") {
.Call(`_imager_autocrop_`, im, color, axes)
}
rotate <- function(im, angle, interpolation = 1L, boundary = 0L) {
.Call(`_imager_rotate`, im, angle, interpolation, boundary)
}
#' Rotate image by an arbitrary angle, around a center point.
#'
#' @param im an image
#' @param angle Rotation angle, in degrees.
#' @param cx X-coordinate of the rotation center.
#' @param cy Y-coordinate of the rotation center.
#' @param interpolation Interpolation type. 0=nearest | 1=linear | 2=cubic
#' @param boundary_conditions Boundary conditions. 0=dirichlet | 1=neumann | 2=periodic
#' @examples
#' rotate_xy(boats,30,200,400) %>% plot
#' rotate_xy(boats,30,200,400,boundary=2) %>% plot
#' @export
rotate_xy <- function(im, angle, cx, cy, interpolation = 1L, boundary_conditions = 0L) {
.Call(`_imager_rotate_xy`, im, angle, cx, cy, interpolation, boundary_conditions)
}
#' Mirror image content along specified axis
#' @param im an image
#' @param axis Mirror axis ("x","y","z","c")
#' @export
#' @examples
#' mirror(boats,"x") %>% plot
#' mirror(boats,"y") %>% plot
mirror <- function(im, axis) {
.Call(`_imager_mirror`, im, axis)
}
#' Permute image axes
#'
#' By default images are stored in xyzc order. Use permute_axes to change that order.
#' @param im an image
#' @param perm a character string, e.g., "zxyc" to have the z-axis come first
#' @export
#' @examples
#' im <- array(0,c(10,30,40,3)) %>% as.cimg
#' permute_axes(im,"zxyc")
permute_axes <- function(im, perm) {
.Call(`_imager_permute_axes`, im, perm)
}
#' @describeIn resize_uniform Double size
#' @export
resize_doubleXY <- function(im) {
.Call(`_imager_resize_doubleXY`, im)
}
#' @describeIn resize_uniform Half size
#' @export
resize_halfXY <- function(im) {
.Call(`_imager_resize_halfXY`, im)
}
#' @describeIn resize_uniform Triple size
#' @export
resize_tripleXY <- function(im) {
.Call(`_imager_resize_tripleXY`, im)
}
#' Shift image content.
#'
#' @param im an image
#' @param delta_x Amount of displacement along the X-axis.
#' @param delta_y Amount of displacement along the Y-axis.
#' @param delta_z Amount of displacement along the Z-axis.
#' @param delta_c Amount of displacement along the C-axis.
#' @param boundary_conditions can be:
#' - 0: Zero border condition (Dirichlet).
#' - 1: Nearest neighbors (Neumann).
#' - 2: Repeat Pattern (Fourier style).
#' @export
#' @examples
#' imshift(boats,10,50) %>% plot
imshift <- function(im, delta_x = 0L, delta_y = 0L, delta_z = 0L, delta_c = 0L, boundary_conditions = 0L) {
.Call(`_imager_imshift`, im, delta_x, delta_y, delta_z, delta_c, boundary_conditions)
}
#' Resize image
#'
#' If the dimension arguments are negative, they are interpreted as a proportion of the original image.
#' @param im an image
#' @param size_x Number of columns (new size along the X-axis).
#' @param size_y Number of rows (new size along the Y-axis).
#' @param size_z Number of slices (new size along the Z-axis).
#' @param size_c Number of vector-channels (new size along the C-axis).
#' @param interpolation_type Method of interpolation:
#' -1 = no interpolation: raw memory resizing.
#' 0 = no interpolation: additional space is filled according to boundary_conditions.
#' 1 = nearest-neighbor interpolation.
#' 2 = moving average interpolation.
#' 3 = linear interpolation.
#' 4 = grid interpolation.
#' 5 = cubic interpolation.
#' 6 = lanczos interpolation.
#' @param boundary_conditions Border condition type.
#' @param centering_x Set centering type (only if interpolation_type=0).
#' @param centering_y Set centering type (only if interpolation_type=0).
#' @param centering_z Set centering type (only if interpolation_type=0).
#' @param centering_c Set centering type (only if interpolation_type=0).
#' @seealso See imresize for an easier interface.
#' @export
resize <- function(im, size_x = -100L, size_y = -100L, size_z = -100L, size_c = -100L, interpolation_type = 1L, boundary_conditions = 0L, centering_x = 0, centering_y = 0, centering_z = 0, centering_c = 0) {
.Call(`_imager_resize`, im, size_x, size_y, size_z, size_c, interpolation_type, boundary_conditions, centering_x, centering_y, centering_z, centering_c)
}
#' Warp image
#'
#' @param im an image
#' @param warpfield Warping field. The (x,y,z) fields should be stacked along the colour coordinate.
#' @param mode Can be { 0=backward-absolute | 1=backward-relative | 2=forward-absolute | 3=forward-relative }
#' @param interpolation Can be <tt>{ 0=nearest | 1=linear | 2=cubic }</tt>.
#' @param boundary_conditions Boundary conditions. Can be <tt>{ 0=dirichlet | 1=neumann | 2=periodic }</tt>.
#' @seealso imwarp for a user-friendly interface
#' @export
#' @examples
#' #Shift image via warp
#' warp.x <- imfill(width(boats),height(boats),val=5)
#' warp.y <- imfill(width(boats),height(boats),val=20)
#' warpfield <- list(warp.x,warp.y) %>% imappend("c")
#' warp(boats,warpfield,mode=1) %>% plot
warp <- function(im, warpfield, mode = 0L, interpolation = 1L, boundary_conditions = 0L) {
.Call(`_imager_warp`, im, warpfield, mode, interpolation, boundary_conditions)
}
load_image <- function(fname) {
.Call(`_imager_load_image`, fname)
}
save_image <- function(im, fname) {
invisible(.Call(`_imager_save_image`, im, fname))
}
#' Split an image along a certain axis (producing a list)
#'
#' @param im an image
#' @param axis the axis along which to split (for example 'c')
#' @param nb number of objects to split into.
#' if nb=-1 (the default) the maximum number of splits is used ie. split(im,"c") produces a list containing all individual colour channels
#' @seealso imappend (the reverse operation)
im_split <- function(im, axis, nb = -1L) {
.Call(`_imager_im_split`, im, axis, nb)
}
im_append <- function(imlist, axis) {
.Call(`_imager_im_append`, imlist, axis)
}
px_append <- function(imlist, axis) {
.Call(`_imager_px_append`, imlist, axis)
}
#' Extract a numerical summary from image patches, using CImg's mini-language
#' Experimental feature.
#' @param im an image
#' @param expr a CImg expression (as a string)
#' @param cx vector of x coordinates for patch centers
#' @param cy vector of y coordinates for patch centers
#' @param wx vector of coordinates for patch width
#' @param wy vector of coordinates for patch height
#' @examples
#' #Example: median filtering using patch_summary_cimg
#' #Center a patch at each pixel
#' im <- grayscale(boats)
#' patches <- pixel.grid(im) %>% dplyr::mutate(w=3,h=3)
#' #Extract patch summary
#' out <- dplyr::mutate(patches,med=patch_summary_cimg(im,"ic",x,y,w,h))
#' as.cimg(out,v.name="med") %>% plot
#' @export
patch_summary_cimg <- function(im, expr, cx, cy, wx, wy) {
.Call(`_imager_patch_summary_cimg`, im, expr, cx, cy, wx, wy)
}
extract_fast <- function(im, fun, cx, cy, wx, wy) {
.Call(`_imager_extract_fast`, im, fun, cx, cy, wx, wy)
}
#' Extract image patches and return a list
#'
#' Patches are rectangular (cubic) image regions centered at cx,cy (cz) with width wx and height wy (opt. depth wz)
#' WARNINGS:
#' - values outside of the image region are subject to boundary conditions. The default is to set them to 0 (Dirichlet), other boundary conditions are listed below.
#' - widths and heights should be odd integers (they're rounded up otherwise).
#' @param im an image
#' @param cx vector of x coordinates for patch centers
#' @param cy vector of y coordinates for patch centers
#' @param wx vector of patch widths (or single value)
#' @param wy vector of patch heights (or single value)
#' @param boundary_conditions integer. Can be 0 (Dirichlet, default), 1 (Neumann) 2 (Periodic) 3 (mirror).
#' @return a list of image patches (cimg objects)
#' @export
#' @examples
#' #2 patches of size 5x5 located at (10,10) and (10,20)
#' extract_patches(boats,c(10,10),c(10,20),5,5)
extract_patches <- function(im, cx, cy, wx, wy, boundary_conditions = 0L) {
.Call(`_imager_extract_patches`, im, cx, cy, wx, wy, boundary_conditions)
}
#' @param cz vector of z coordinates for patch centers
#' @param wz vector of coordinates for patch depth
#' @describeIn extract_patches Extract 3D patches
#' @export
extract_patches3D <- function(im, cx, cy, cz, wx, wy, wz, boundary_conditions = 0L) {
.Call(`_imager_extract_patches3D`, im, cx, cy, cz, wx, wy, wz, boundary_conditions)
}
draw_image <- function(im, sprite, x = 0L, y = 0L, z = 0L, opacity = 1) {
.Call(`_imager_draw_image`, im, sprite, x, y, z, opacity)
}
do_patchmatch <- function(im1, im2, patch_width, patch_height, patch_depth, nb_iterations, nb_randoms, guide) {
.Call(`_imager_do_patchmatch`, im1, im2, patch_width, patch_height, patch_depth, nb_iterations, nb_randoms, guide)
}
checkcoords <- function(x, y, z, c, d) {
.Call(`_imager_checkcoords`, x, y, z, c, d)
}
cimg_omp <- function() {
.Call(`_imager_cimg_omp`)
}
set_cimg_omp <- function(mode) {
.Call(`_imager_set_cimg_omp`, mode)
}
has_omp <- function() {
.Call(`_imager_has_omp`)
}
px_split <- function(im, axis, nb = -1L) {
.Call(`_imager_px_split`, im, axis, nb)
}
read_video <- function(vpipe, cimg_array, nframes, width, height, block_size) {
.Call(`_imager_read_video`, vpipe, cimg_array, nframes, width, height, block_size)
}
save_video <- function(vpipe, cimg_array, nframes, width, height, block_size) {
.Call(`_imager_save_video`, vpipe, cimg_array, nframes, width, height, block_size)
}
| /R/RcppExports.R | no_license | jalgos/imager | R | false | false | 34,505 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @describeIn imager.colourspaces RGB to HSL conversion
#' @export
RGBtoHSL <- function(im) {
.Call(`_imager_RGBtoHSL`, im)
}
#' @describeIn imager.colourspaces CIE RGB to CIE XYZ (1931) conversion, D65 white point
#' @export
RGBtoXYZ <- function(im) {
.Call(`_imager_RGBtoXYZ`, im)
}
#' @describeIn imager.colourspaces CIE XYZ to CIE RGB (1931) conversion, D65 white point
#' @export
XYZtoRGB <- function(im) {
.Call(`_imager_XYZtoRGB`, im)
}
#' @describeIn imager.colourspaces HSL to RGB conversion
#' @export
HSLtoRGB <- function(im) {
.Call(`_imager_HSLtoRGB`, im)
}
#' @describeIn imager.colourspaces RGB to HSV conversion
#' @export
RGBtoHSV <- function(im) {
.Call(`_imager_RGBtoHSV`, im)
}
#' @describeIn imager.colourspaces HSV to RGB conversion
#' @export
HSVtoRGB <- function(im) {
.Call(`_imager_HSVtoRGB`, im)
}
#' @describeIn imager.colourspaces RGB to HSI conversion
#' @export
RGBtoHSI <- function(im) {
.Call(`_imager_RGBtoHSI`, im)
}
#' @describeIn imager.colourspaces HSI to RGB conversion
#' @export
HSItoRGB <- function(im) {
.Call(`_imager_HSItoRGB`, im)
}
#' @describeIn imager.colourspaces RGB to sRGB conversion
#' @export
RGBtosRGB <- function(im) {
.Call(`_imager_RGBtosRGB`, im)
}
#' @describeIn imager.colourspaces sRGB to RGB conversion
#' @export
sRGBtoRGB <- function(im) {
.Call(`_imager_sRGBtoRGB`, im)
}
#' @describeIn imager.colourspaces RGB to YCbCr conversion
#' @export
RGBtoYCbCr <- function(im) {
.Call(`_imager_RGBtoYCbCr`, im)
}
#' @describeIn imager.colourspaces YCbCr to RGB conversion
#' @export
YCbCrtoRGB <- function(im) {
.Call(`_imager_YCbCrtoRGB`, im)
}
#' @describeIn imager.colourspaces RGB to YUV conversion
#' @export
RGBtoYUV <- function(im) {
.Call(`_imager_RGBtoYUV`, im)
}
#' @describeIn imager.colourspaces YUV to RGB conversion
#' @export
YUVtoRGB <- function(im) {
.Call(`_imager_YUVtoRGB`, im)
}
#' @describeIn imager.colourspaces Lab to RGB (linear)
#' @export
LabtoRGB <- function(im) {
.Call(`_imager_LabtoRGB`, im)
}
#' @describeIn imager.colourspaces RGB (linear) to Lab
#' @export
RGBtoLab <- function(im) {
.Call(`_imager_RGBtoLab`, im)
}
#' @describeIn imager.colourspaces Lab to XYZ
#' @export
LabtoXYZ <- function(im) {
.Call(`_imager_LabtoXYZ`, im)
}
#' @describeIn imager.colourspaces XYZ to Lab
#' @export
XYZtoLab <- function(im) {
.Call(`_imager_XYZtoLab`, im)
}
#' @describeIn imager.colourspaces Lab to sRGB
#' @export
LabtosRGB <- function(im) {
.Call(`_imager_LabtosRGB`, im)
}
#' @describeIn imager.colourspaces sRGB to Lab
#' @export
sRGBtoLab <- function(im) {
.Call(`_imager_sRGBtoLab`, im)
}
getXc <- function(x, y, z, c) {
.Call(`_imager_getXc`, x, y, z, c)
}
getYc <- function(x, y, z, c) {
.Call(`_imager_getYc`, x, y, z, c)
}
getZc <- function(x, y, z, c) {
.Call(`_imager_getZc`, x, y, z, c)
}
getCc <- function(x, y, z, c) {
.Call(`_imager_getCc`, x, y, z, c)
}
display_ <- function(im, rescale = TRUE) {
invisible(.Call(`_imager_display_`, im, rescale))
}
display_list <- function(imlist) {
invisible(.Call(`_imager_display_list`, imlist))
}
#' Play a video
#'
#' A very basic video player. Press the space bar to pause and ESC to close.
#' @param vid A cimg object, to be played as video
#' @param loop loop the video (default false)
#' @param delay delay between frames, in ms. Default 30.
#' @param normalise if true pixel values are rescaled to 0...255 (default TRUE). The normalisation is based on the *first frame*. If you don't want the default behaviour you can normalise by hand. Default TRUE.
#' @export
play <- function(vid, loop = FALSE, delay = 30L, normalise = TRUE) {
invisible(.Call(`_imager_play`, vid, loop, delay, normalise))
}
select <- function(im, type = 2L) {
.Call(`_imager_select`, im, type)
}
bucket_fill <- function(im, x, y, z, color, opacity = 1, sigma = 0, high_connexity = FALSE) {
.Call(`_imager_bucket_fill`, im, x, y, z, color, opacity, sigma, high_connexity)
}
bucket_select <- function(im, x, y, z, sigma = 0, high_connexity = FALSE) {
.Call(`_imager_bucket_select`, im, x, y, z, sigma, high_connexity)
}
draw_circle <- function(im, x, y, radius, color, opacity = 1, filled = TRUE) {
.Call(`_imager_draw_circle`, im, x, y, radius, color, opacity, filled)
}
draw_circle_ <- function(im, x, y, radius, color, opacity = 1L, filled = TRUE) {
.Call(`_imager_draw_circle_`, im, x, y, radius, color, opacity, filled)
}
draw_rect_ <- function(im, x0, y0, x1, y1, color, opacity = 1, filled = TRUE) {
.Call(`_imager_draw_rect_`, im, x0, y0, x1, y1, color, opacity, filled)
}
draw_poly_ <- function(im, points, color, opacity = 1) {
.Call(`_imager_draw_poly_`, im, points, color, opacity)
}
draw_text_ <- function(im, x, y, text, color, opacity = 1, fsize = 20L) {
.Call(`_imager_draw_text_`, im, x, y, text, color, opacity, fsize)
}
#' Apply recursive Deriche filter.
#'
#' The Deriche filter is a fast approximation to a Gaussian filter (order = 0), or Gaussian derivatives (order = 1 or 2).
#'
#' @param im an image
#' @param sigma Standard deviation of the filter.
#' @param order Order of the filter. 0 for a smoothing filter, 1 for first-derivative, 2 for second.
#' @param axis Axis along which the filter is computed ( 'x' , 'y', 'z' or 'c').
#' @param neumann If true, use Neumann boundary conditions (default false, Dirichlet)
#' @export
#' @examples
#' deriche(boats,sigma=2,order=0) %>% plot("Zeroth-order Deriche along x")
#' deriche(boats,sigma=2,order=1) %>% plot("First-order Deriche along x")
#' deriche(boats,sigma=2,order=1) %>% plot("Second-order Deriche along x")
#' deriche(boats,sigma=2,order=1,axis="y") %>% plot("Second-order Deriche along y")
deriche <- function(im, sigma, order = 0L, axis = 'x', neumann = FALSE) {
.Call(`_imager_deriche`, im, sigma, order, axis, neumann)
}
#' Young-Van Vliet recursive Gaussian filter.
#'
#' The Young-van Vliet filter is a fast approximation to a Gaussian filter (order = 0), or Gaussian derivatives (order = 1 or 2).
#'
#' @param im an image
#' @param sigma standard deviation of the Gaussian filter
#' @param order the order of the filter 0,1,2,3
#' @param axis Axis along which the filter is computed. One of 'x', 'y', 'z', 'c'
#' @param neumann If true, use Neumann boundary conditions (default false, Dirichlet)
#' @references
#' From: I.T. Young, L.J. van Vliet, M. van Ginkel, Recursive Gabor filtering.
#' IEEE Trans. Sig. Proc., vol. 50, pp. 2799-2805, 2002.
#' (this is an improvement over Young-Van Vliet, Sig. Proc. 44, 1995)
#'
#' Boundary conditions (only for order 0) using Triggs matrix, from
#' B. Triggs and M. Sdika. Boundary conditions for Young-van Vliet
#' recursive filtering. IEEE Trans. Signal Processing,
#' vol. 54, pp. 2365-2367, 2006.
#' @examples
#' vanvliet(boats,sigma=2,order=0) %>% plot("Zeroth-order Young-van Vliet along x")
#' vanvliet(boats,sigma=2,order=1) %>% plot("First-order Young-van Vliet along x")
#' vanvliet(boats,sigma=2,order=1) %>% plot("Second-order Young-van Vliet along x")
#' vanvliet(boats,sigma=2,order=1,axis="y") %>% plot("Second-order Young-van Vliet along y")
#' @export
vanvliet <- function(im, sigma, order = 0L, axis = 'x', neumann = FALSE) {
.Call(`_imager_vanvliet`, im, sigma, order, axis, neumann)
}
isoblur_ <- function(im, sigma, neumann = TRUE, gaussian = FALSE) {
.Call(`_imager_isoblur_`, im, sigma, neumann, gaussian)
}
#' Blur image with the median filter.
#'
#' In a window of size n x n centered at pixel (x,y), compute median pixel value over the window. Optionally, ignore values that are too far from the value at current pixel.
#'
#' @param im an image
#' @param n Size of the median filter.
#' @param threshold Threshold used to discard pixels too far from the current pixel value in the median computation. Can be used for edge-preserving smoothing. Default 0 (include all pixels in window).
#' @export
#' @examples
#' medianblur(boats,5) %>% plot(main="Median blur, 5 pixels")
#' medianblur(boats,10) %>% plot(main="Median blur, 10 pixels")
#' medianblur(boats,10,8) %>% plot(main="Median blur, 10 pixels, threshold = 8")
#' @seealso isoblur, boxblur
medianblur <- function(im, n, threshold = 0) {
.Call(`_imager_medianblur`, im, n, threshold)
}
#' Blur image with a box filter (square window)
#' @param im an image
#' @param boxsize Size of the box window (can be subpixel).
#' @param neumann If true, use Neumann boundary conditions, Dirichlet otherwise (default true, Neumann)
#' @seealso deriche(), vanvliet().
#' @examples
#' boxblur(boats,5) %>% plot(main="Dirichlet boundary")
#' boxblur(boats,5,TRUE) %>% plot(main="Neumann boundary")
#' @export
boxblur <- function(im, boxsize, neumann = TRUE) {
.Call(`_imager_boxblur`, im, boxsize, neumann)
}
#' Compute image Laplacian
#'
#' The Laplacian is the sum of second derivatives, approximated here using finite differences.
#' @param im an image
#' @examples
#' imlap(boats) %>% plot
#' @export
imlap <- function(im) {
.Call(`_imager_imlap`, im)
}
#' Blur image with a box filter.
#'
#' This is a recursive algorithm, not depending on the values of the box kernel size.
#'
#' @param im an image
#' @param sx Size of the box window, along the X-axis.
#' @param sy Size of the box window, along the Y-axis.
#' @param neumann If true, use Neumann boundary conditions, Dirichlet otherwise (default true, Neumann)
#' @seealso blur().
#'
#' @export
#' @examples
#' boxblur_xy(boats,20,5) %>% plot(main="Anisotropic blur")
boxblur_xy <- function(im, sx, sy, neumann = TRUE) {
.Call(`_imager_boxblur_xy`, im, sx, sy, neumann)
}
#' Correlation/convolution of image by filter
#'
#' The correlation of image im by filter flt is defined as:
#' \eqn{res(x,y,z) = sum_{i,j,k} im(x + i,y + j,z + k)*flt(i,j,k).}
#' The convolution of an image img by filter flt is defined to be:
#' \eqn{res(x,y,z) = sum_{i,j,k} img(x-i,y-j,z-k)*flt(i,j,k)}
#'
#' @param im an image
#' @param filter the correlation kernel.
#' @param dirichlet boundary condition. Dirichlet if true, Neumann if false (default TRUE, Dirichlet)
#' @param normalise compute a normalised correlation (ie. local cosine similarity)
#'
#'
#' @export
#' @examples
#' #Edge filter
#' filter <- as.cimg(function(x,y) sign(x-5),10,10)
#' layout(t(1:2))
#' #Convolution vs. correlation
#' correlate(boats,filter) %>% plot(main="Correlation")
#' convolve(boats,filter) %>% plot(main="Convolution")
correlate <- function(im, filter, dirichlet = TRUE, normalise = FALSE) {
.Call(`_imager_correlate`, im, filter, dirichlet, normalise)
}
#' @describeIn correlate convolve image with filter
#' @export
convolve <- function(im, filter, dirichlet = TRUE, normalise = FALSE) {
.Call(`_imager_convolve`, im, filter, dirichlet, normalise)
}
sharpen <- function(im, amplitude, sharpen_type = FALSE, edge = 1, alpha = 0, sigma = 0) {
.Call(`_imager_sharpen`, im, amplitude, sharpen_type, edge, alpha, sigma)
}
#' Compute image gradient.
#'
#' @param im an image
#' @param axes Axes considered for the gradient computation, as a C-string (e.g "xy").
#' @param scheme = Numerical scheme used for the gradient computation:
#' 1 = Backward finite differences
#' 0 = Centered finite differences
#' 1 = Forward finite differences
#' 2 = Using Sobel masks
#' 3 = Using rotation invariant masks
#' 4 = Using Deriche recursive filter.
#' 5 = Using Van Vliet recursive filter.
#' @return a list of images (corresponding to the different directions)
#' @export
#' @seealso imgradient
get_gradient <- function(im, axes = "", scheme = 3L) {
.Call(`_imager_get_gradient`, im, axes, scheme)
}
#' Return image hessian.
#' @param im an image
#' @param axes Axes considered for the hessian computation, as a character string (e.g "xy").
get_hessian <- function(im, axes = "") {
.Call(`_imager_get_hessian`, im, axes)
}
#' Compute field of diffusion tensors for edge-preserving smoothing.
#'
#' @param im an image
#' @param sharpness Sharpness
#' @param anisotropy Anisotropy
#' @param alpha Standard deviation of the gradient blur.
#' @param sigma Standard deviation of the structure tensor blur.
#' @param is_sqrt Tells if the square root of the tensor field is computed instead.
#' @export
diffusion_tensors <- function(im, sharpness = 0.7, anisotropy = 0.6, alpha = 0.6, sigma = 1.1, is_sqrt = FALSE) {
.Call(`_imager_diffusion_tensors`, im, sharpness, anisotropy, alpha, sigma, is_sqrt)
}
#' Compute Haar multiscale wavelet transform.
#'
#' @param im an image
#' @param inverse Compute inverse transform (default FALSE)
#' @param nb_scales Number of scales used for the transform.
#' @export
#' @examples
#' #Image compression: set small Haar coefficients to 0
#' hr <- haar(boats,nb=3)
#' mask.low <- threshold(abs(hr),"75%")
#' mask.high <- threshold(abs(hr),"95%")
#' haar(hr*mask.low,inverse=TRUE,nb=3) %>% plot(main="75% compression")
#' haar(hr*mask.high,inverse=TRUE,nb=3) %>% plot(main="95% compression")
haar <- function(im, inverse = FALSE, nb_scales = 1L) {
.Call(`_imager_haar`, im, inverse, nb_scales)
}
FFT_complex <- function(real, imag, inverse = FALSE, nb_threads = 0L) {
.Call(`_imager_FFT_complex`, real, imag, inverse, nb_threads)
}
FFT_realim <- function(real, inverse = FALSE, nb_threads = 0L) {
.Call(`_imager_FFT_realim`, real, inverse, nb_threads)
}
FFT_realout <- function(real, imag, inverse = FALSE, nb_threads = 0L) {
.Call(`_imager_FFT_realout`, real, imag, inverse, nb_threads)
}
#' Estimate displacement field between two images.
#'
#' @param sourceIm Reference image.
#' @param destIm Reference image.
#' @param smoothness Smoothness of estimated displacement field.
#' @param precision Precision required for algorithm convergence.
#' @param nb_scales Number of scales used to estimate the displacement field.
#' @param iteration_max Maximum number of iterations allowed for one scale.
#' @param is_backward If false, match I2(X + U(X)) = I1(X), else match I2(X) = I1(X - U(X)).
#' @export
displacement <- function(sourceIm, destIm, smoothness = 0.1, precision = 5.0, nb_scales = 0L, iteration_max = 10000L, is_backward = FALSE) {
.Call(`_imager_displacement`, sourceIm, destIm, smoothness, precision, nb_scales, iteration_max, is_backward)
}
#' Blur image anisotropically, in an edge-preserving way.
#'
#' Standard blurring removes noise from images, but tends to smooth away edges in the process. This anisotropic filter preserves edges better.
#'
#' @param im an image
#' @param amplitude Amplitude of the smoothing.
#' @param sharpness Sharpness.
#' @param anisotropy Anisotropy.
#' @param alpha Standard deviation of the gradient blur.
#' @param sigma Standard deviation of the structure tensor blur.
#' @param dl Spatial discretization.
#' @param da Angular discretization.
#' @param gauss_prec Precision of the diffusion process.
#' @param interpolation_type Interpolation scheme.
#' Can be 0=nearest-neighbor | 1=linear | 2=Runge-Kutta
#' @param fast_approx If true, use fast approximation (default TRUE)
#' @export
#' @examples
#' im <- load.image(system.file('extdata/Leonardo_Birds.jpg',package='imager'))
#' im.noisy <- (im + 80*rnorm(prod(dim(im))))
#' blur_anisotropic(im.noisy,ampl=1e4,sharp=1) %>% plot
blur_anisotropic <- function(im, amplitude, sharpness = 0.7, anisotropy = 0.6, alpha = 0.6, sigma = 1.1, dl = 0.8, da = 30, gauss_prec = 2, interpolation_type = 0L, fast_approx = TRUE) {
.Call(`_imager_blur_anisotropic`, im, amplitude, sharpness, anisotropy, alpha, sigma, dl, da, gauss_prec, interpolation_type, fast_approx)
}
periodic_part <- function(im) {
.Call(`_imager_periodic_part`, im)
}
hough_line_px <- function(px, theta) {
.Call(`_imager_hough_line_px`, px, theta)
}
hough_line_grad <- function(im, ntheta, alpha = 1.5) {
.Call(`_imager_hough_line_grad`, im, ntheta, alpha)
}
hough_circle_ <- function(px, radius) {
.Call(`_imager_hough_circle_`, px, radius)
}
bgraph <- function(px) {
.Call(`_imager_bgraph`, px)
}
interact_ <- function(fun, init, title = "") {
.Call(`_imager_interact_`, fun, init, title)
}
interp_xy <- function(inp, ix, iy, z = 0L, c = 0L, cubic = FALSE) {
.Call(`_imager_interp_xy`, inp, ix, iy, z, c, cubic)
}
interp_xyz <- function(inp, ix, iy, iz, c = 0L, cubic = FALSE) {
.Call(`_imager_interp_xyz`, inp, ix, iy, iz, c, cubic)
}
interp_xyzc <- function(inp, ix, iy, iz, ic, cubic = FALSE) {
.Call(`_imager_interp_xyzc`, inp, ix, iy, iz, ic, cubic)
}
interp_xyc <- function(inp, ix, iy, z, ic, cubic = FALSE) {
.Call(`_imager_interp_xyc`, inp, ix, iy, z, ic, cubic)
}
#' Label connected components.
#'
#' The algorithm of connected components computation has been primarily done
#'by A. Meijster, according to the publication:
#''W.H. Hesselink, A. Meijster, C. Bron, "Concurrent Determination of Connected Components.",
#' In: Science of Computer Programming 41 (2001), pp. 173--194'.
#'
#' @param im an image
#' @param high_connectivity 4(false)- or 8(true)-connectivity
#' in 2d case, and between 6(false)- or 26(true)-connectivity in 3d case. Default FALSE
#' @param tolerance Tolerance used to determine if two neighboring pixels belong to the same region.
#' @export
#' @examples
#' imname <- system.file('extdata/parrots.png',package='imager')
#' im <- load.image(imname) %>% grayscale
#' #Thresholding yields different discrete regions of high intensity
#' regions <- isoblur(im,10) %>% threshold("97%")
#' labels <- label(regions)
#' layout(t(1:2))
#' plot(regions,"Regions")
#' plot(labels,"Labels")
#'
label <- function(im, high_connectivity = FALSE, tolerance = 0) {
.Call(`_imager_label`, im, high_connectivity, tolerance)
}
blabel <- function(im, high_connectivity = FALSE) {
.Call(`_imager_blabel`, im, high_connectivity)
}
#' Erode/dilate image by a structuring element.
#'
#' @param im an image
#' @param size size of the structuring element.
#' @param mask Structuring element.
#' @param boundary_conditions Boundary conditions. If FALSE, pixels beyond image boundaries are considered to be 0, if TRUE one. Default: TRUE.
#' @param real_mode If TRUE, perform erosion as defined on the reals. If FALSE, perform binary erosion (default FALSE).
#' @export
#' @examples
#' fname <- system.file('extdata/Leonardo_Birds.jpg',package='imager')
#' im <- load.image(fname) %>% grayscale
#' outline <- threshold(-im,"95%")
#' plot(outline)
#' mask <- imfill(5,10,val=1) #Rectangular mask
#' plot(erode(outline,mask))
#' plot(erode_rect(outline,5,10)) #Same thing
#' plot(erode_square(outline,5))
#' plot(dilate(outline,mask))
#' plot(dilate_rect(outline,5,10))
#' plot(dilate_square(outline,5))
erode <- function(im, mask, boundary_conditions = TRUE, real_mode = FALSE) {
.Call(`_imager_erode`, im, mask, boundary_conditions, real_mode)
}
berode <- function(im, mask, boundary_conditions = TRUE) {
.Call(`_imager_berode`, im, mask, boundary_conditions)
}
#' @describeIn erode Erode image by a rectangular structuring element of specified size.
#' @param sx Width of the structuring element.
#' @param sy Height of the structuring element.
#' @param sz Depth of the structuring element.
#' @export
erode_rect <- function(im, sx, sy, sz = 1L) {
.Call(`_imager_erode_rect`, im, sx, sy, sz)
}
berode_rect <- function(im, sx, sy, sz = 1L) {
.Call(`_imager_berode_rect`, im, sx, sy, sz)
}
#' @describeIn erode Erode image by a square structuring element of specified size.
#'
#' @export
erode_square <- function(im, size) {
.Call(`_imager_erode_square`, im, size)
}
berode_square <- function(im, size) {
.Call(`_imager_berode_square`, im, size)
}
#' @describeIn erode Dilate image by a structuring element.
#' @export
dilate <- function(im, mask, boundary_conditions = TRUE, real_mode = FALSE) {
.Call(`_imager_dilate`, im, mask, boundary_conditions, real_mode)
}
bdilate <- function(im, mask, boundary_conditions = TRUE) {
.Call(`_imager_bdilate`, im, mask, boundary_conditions)
}
#' @describeIn erode Dilate image by a rectangular structuring element of specified size
#' @export
dilate_rect <- function(im, sx, sy, sz = 1L) {
.Call(`_imager_dilate_rect`, im, sx, sy, sz)
}
bdilate_rect <- function(im, sx, sy, sz = 1L) {
.Call(`_imager_bdilate_rect`, im, sx, sy, sz)
}
#' @describeIn erode Dilate image by a square structuring element of specified size
#' @export
dilate_square <- function(im, size) {
.Call(`_imager_dilate_square`, im, size)
}
bdilate_square <- function(im, size) {
.Call(`_imager_bdilate_square`, im, size)
}
#' Compute watershed transform.
#'
#' The watershed transform is a label propagation algorithm. The value of non-zero pixels will get propagated to their zero-value neighbours. The propagation is controlled by a priority map. See examples.
#' @param im an image
#' @param priority Priority map.
#' @param fill_lines Sets if watershed lines must be filled or not.
#' @examples
#' #In our initial image we'll place three seeds
#' #(non-zero pixels) at various locations, with values 1, 2 and 3.
#' #We'll use the watershed algorithm to propagate these values
#' imd <- function(x,y) imdirac(c(100,100,1,1),x,y)
#' im <- imd(20,20)+2*imd(40,40)+3*imd(80,80)
#' layout(t(1:3))
#' plot(im,main="Seed image")
#' #Now we build an priority map: neighbours of our seeds
#' #should get high priority.
#' #We'll use a distance map for that
#' p <- 1-distance_transform(sign(im),1)
#' plot(p,main="Priority map")
#' watershed(im,p) %>% plot(main="Watershed transform")
#' @export
watershed <- function(im, priority, fill_lines = TRUE) {
.Call(`_imager_watershed`, im, priority, fill_lines)
}
#' Compute Euclidean distance function to a specified value.
#'
#' The distance transform implementation has been submitted by A. Meijster, and implements
#' the article 'W.H. Hesselink, A. Meijster, J.B.T.M. Roerdink,
#' "A general algorithm for computing distance transforms in linear time.",
#' In: Mathematical Morphology and its Applications to Image and Signal Processing,
#' J. Goutsias, L. Vincent, and D.S. Bloomberg (eds.), Kluwer, 2000, pp. 331-340.'
#' The submitted code has then been modified to fit CImg coding style and constraints.
#' @param im an image
#' @param value Reference value.
#' @param metric Type of metric. Can be <tt>{ 0=Chebyshev | 1=Manhattan | 2=Euclidean | 3=Squared-euclidean }</tt>.
#' @export
#' @examples
#' imd <- function(x,y) imdirac(c(100,100,1,1),x,y)
#' #Image is three white dots
#' im <- imd(20,20)+imd(40,40)+imd(80,80)
#' plot(im)
#' #How far are we from the nearest white dot?
#' distance_transform(im,1) %>% plot
distance_transform <- function(im, value, metric = 2L) {
.Call(`_imager_distance_transform`, im, value, metric)
}
bdistance_transform <- function(im, value = TRUE, metric = 2L) {
.Call(`_imager_bdistance_transform`, im, value, metric)
}
#' @describeIn erode Morphological opening (erosion followed by dilation)
#' @export
mopening <- function(im, mask, boundary_conditions = TRUE, real_mode = FALSE) {
.Call(`_imager_mopening`, im, mask, boundary_conditions, real_mode)
}
#' @describeIn erode Morphological opening by a square element (erosion followed by dilation)
#' @export
mopening_square <- function(im, size) {
.Call(`_imager_mopening_square`, im, size)
}
#' @describeIn erode Morphological closing by a square element (dilation followed by erosion)
#' @export
mclosing_square <- function(im, size) {
.Call(`_imager_mclosing_square`, im, size)
}
#' @describeIn erode Morphological closing (dilation followed by erosion)
#' @export
mclosing <- function(im, mask, boundary_conditions = TRUE, real_mode = FALSE) {
.Call(`_imager_mclosing`, im, mask, boundary_conditions, real_mode)
}
reduce_wsum <- function(x, w, na_rm = FALSE) {
.Call(`_imager_reduce_wsum`, x, w, na_rm)
}
reduce_average <- function(x, na_rm = FALSE) {
.Call(`_imager_reduce_average`, x, na_rm)
}
reduce_prod <- function(x, na_rm = FALSE) {
.Call(`_imager_reduce_prod`, x, na_rm)
}
reduce_minmax <- function(x, na_rm = FALSE, max = TRUE) {
.Call(`_imager_reduce_minmax`, x, na_rm, max)
}
reduce_list <- function(x, summary = 0L) {
.Call(`_imager_reduce_list`, x, summary)
}
reduce_list2 <- function(x, summary = 0L) {
.Call(`_imager_reduce_list2`, x, summary)
}
reduce_med <- function(x, na_rm = FALSE) {
.Call(`_imager_reduce_med`, x, na_rm)
}
psort <- function(x, increasing = TRUE) {
.Call(`_imager_psort`, x, increasing)
}
porder <- function(x, increasing = TRUE) {
.Call(`_imager_porder`, x, increasing)
}
prank <- function(x, increasing = TRUE) {
.Call(`_imager_prank`, x, increasing)
}
autocrop_ <- function(im, color, axes = "zyx") {
.Call(`_imager_autocrop_`, im, color, axes)
}
rotate <- function(im, angle, interpolation = 1L, boundary = 0L) {
.Call(`_imager_rotate`, im, angle, interpolation, boundary)
}
#' Rotate image by an arbitrary angle, around a center point.
#'
#' @param im an image
#' @param angle Rotation angle, in degrees.
#' @param cx X-coordinate of the rotation center.
#' @param cy Y-coordinate of the rotation center.
#' @param interpolation Interpolation type. 0=nearest | 1=linear | 2=cubic
#' @param boundary_conditions Boundary conditions. 0=dirichlet | 1=neumann | 2=periodic
#' @examples
#' rotate_xy(boats,30,200,400) %>% plot
#' rotate_xy(boats,30,200,400,boundary=2) %>% plot
#' @export
rotate_xy <- function(im, angle, cx, cy, interpolation = 1L, boundary_conditions = 0L) {
.Call(`_imager_rotate_xy`, im, angle, cx, cy, interpolation, boundary_conditions)
}
#' Mirror image content along specified axis
#' @param im an image
#' @param axis Mirror axis ("x","y","z","c")
#' @export
#' @examples
#' mirror(boats,"x") %>% plot
#' mirror(boats,"y") %>% plot
mirror <- function(im, axis) {
.Call(`_imager_mirror`, im, axis)
}
#' Permute image axes
#'
#' By default images are stored in xyzc order. Use permute_axes to change that order.
#' @param im an image
#' @param perm a character string, e.g., "zxyc" to have the z-axis come first
#' @export
#' @examples
#' im <- array(0,c(10,30,40,3)) %>% as.cimg
#' permute_axes(im,"zxyc")
permute_axes <- function(im, perm) {
.Call(`_imager_permute_axes`, im, perm)
}
#' @describeIn resize_uniform Double size
#' @export
resize_doubleXY <- function(im) {
.Call(`_imager_resize_doubleXY`, im)
}
#' @describeIn resize_uniform Half size
#' @export
resize_halfXY <- function(im) {
.Call(`_imager_resize_halfXY`, im)
}
#' @describeIn resize_uniform Triple size
#' @export
resize_tripleXY <- function(im) {
.Call(`_imager_resize_tripleXY`, im)
}
#' Shift image content.
#'
#' @param im an image
#' @param delta_x Amount of displacement along the X-axis.
#' @param delta_y Amount of displacement along the Y-axis.
#' @param delta_z Amount of displacement along the Z-axis.
#' @param delta_c Amount of displacement along the C-axis.
#' @param boundary_conditions can be:
#' - 0: Zero border condition (Dirichlet).
#' - 1: Nearest neighbors (Neumann).
#' - 2: Repeat Pattern (Fourier style).
#' @export
#' @examples
#' imshift(boats,10,50) %>% plot
imshift <- function(im, delta_x = 0L, delta_y = 0L, delta_z = 0L, delta_c = 0L, boundary_conditions = 0L) {
.Call(`_imager_imshift`, im, delta_x, delta_y, delta_z, delta_c, boundary_conditions)
}
#' Resize image
#'
#' If the dimension arguments are negative, they are interpreted as a proportion of the original image.
#' @param im an image
#' @param size_x Number of columns (new size along the X-axis).
#' @param size_y Number of rows (new size along the Y-axis).
#' @param size_z Number of slices (new size along the Z-axis).
#' @param size_c Number of vector-channels (new size along the C-axis).
#' @param interpolation_type Method of interpolation:
#' -1 = no interpolation: raw memory resizing.
#' 0 = no interpolation: additional space is filled according to boundary_conditions.
#' 1 = nearest-neighbor interpolation.
#' 2 = moving average interpolation.
#' 3 = linear interpolation.
#' 4 = grid interpolation.
#' 5 = cubic interpolation.
#' 6 = lanczos interpolation.
#' @param boundary_conditions Border condition type.
#' @param centering_x Set centering type (only if interpolation_type=0).
#' @param centering_y Set centering type (only if interpolation_type=0).
#' @param centering_z Set centering type (only if interpolation_type=0).
#' @param centering_c Set centering type (only if interpolation_type=0).
#' @seealso See imresize for an easier interface.
#' @export
resize <- function(im, size_x = -100L, size_y = -100L, size_z = -100L, size_c = -100L, interpolation_type = 1L, boundary_conditions = 0L, centering_x = 0, centering_y = 0, centering_z = 0, centering_c = 0) {
.Call(`_imager_resize`, im, size_x, size_y, size_z, size_c, interpolation_type, boundary_conditions, centering_x, centering_y, centering_z, centering_c)
}
#' Warp image
#'
#' @param im an image
#' @param warpfield Warping field. The (x,y,z) fields should be stacked along the colour coordinate.
#' @param mode Can be { 0=backward-absolute | 1=backward-relative | 2=forward-absolute | 3=forward-relative }
#' @param interpolation Can be <tt>{ 0=nearest | 1=linear | 2=cubic }</tt>.
#' @param boundary_conditions Boundary conditions. Can be <tt>{ 0=dirichlet | 1=neumann | 2=periodic }</tt>.
#' @seealso imwarp for a user-friendly interface
#' @export
#' @examples
#' #Shift image via warp
#' warp.x <- imfill(width(boats),height(boats),val=5)
#' warp.y <- imfill(width(boats),height(boats),val=20)
#' warpfield <- list(warp.x,warp.y) %>% imappend("c")
#' warp(boats,warpfield,mode=1) %>% plot
warp <- function(im, warpfield, mode = 0L, interpolation = 1L, boundary_conditions = 0L) {
.Call(`_imager_warp`, im, warpfield, mode, interpolation, boundary_conditions)
}
load_image <- function(fname) {
.Call(`_imager_load_image`, fname)
}
save_image <- function(im, fname) {
invisible(.Call(`_imager_save_image`, im, fname))
}
#' Split an image along a certain axis (producing a list)
#'
#' @param im an image
#' @param axis the axis along which to split (for example 'c')
#' @param nb number of objects to split into.
#' if nb=-1 (the default) the maximum number of splits is used ie. split(im,"c") produces a list containing all individual colour channels
#' @seealso imappend (the reverse operation)
im_split <- function(im, axis, nb = -1L) {
.Call(`_imager_im_split`, im, axis, nb)
}
im_append <- function(imlist, axis) {
.Call(`_imager_im_append`, imlist, axis)
}
px_append <- function(imlist, axis) {
.Call(`_imager_px_append`, imlist, axis)
}
#' Extract a numerical summary from image patches, using CImg's mini-language
#' Experimental feature.
#' @param im an image
#' @param expr a CImg expression (as a string)
#' @param cx vector of x coordinates for patch centers
#' @param cy vector of y coordinates for patch centers
#' @param wx vector of coordinates for patch width
#' @param wy vector of coordinates for patch height
#' @examples
#' #Example: median filtering using patch_summary_cimg
#' #Center a patch at each pixel
#' im <- grayscale(boats)
#' patches <- pixel.grid(im) %>% dplyr::mutate(w=3,h=3)
#' #Extract patch summary
#' out <- dplyr::mutate(patches,med=patch_summary_cimg(im,"ic",x,y,w,h))
#' as.cimg(out,v.name="med") %>% plot
#' @export
patch_summary_cimg <- function(im, expr, cx, cy, wx, wy) {
.Call(`_imager_patch_summary_cimg`, im, expr, cx, cy, wx, wy)
}
extract_fast <- function(im, fun, cx, cy, wx, wy) {
.Call(`_imager_extract_fast`, im, fun, cx, cy, wx, wy)
}
#' Extract image patches and return a list
#'
#' Patches are rectangular (cubic) image regions centered at cx,cy (cz) with width wx and height wy (opt. depth wz)
#' WARNINGS:
#' - values outside of the image region are subject to boundary conditions. The default is to set them to 0 (Dirichlet), other boundary conditions are listed below.
#' - widths and heights should be odd integers (they're rounded up otherwise).
#' @param im an image
#' @param cx vector of x coordinates for patch centers
#' @param cy vector of y coordinates for patch centers
#' @param wx vector of patch widths (or single value)
#' @param wy vector of patch heights (or single value)
#' @param boundary_conditions integer. Can be 0 (Dirichlet, default), 1 (Neumann) 2 (Periodic) 3 (mirror).
#' @return a list of image patches (cimg objects)
#' @export
#' @examples
#' #2 patches of size 5x5 located at (10,10) and (10,20)
#' extract_patches(boats,c(10,10),c(10,20),5,5)
extract_patches <- function(im, cx, cy, wx, wy, boundary_conditions = 0L) {
.Call(`_imager_extract_patches`, im, cx, cy, wx, wy, boundary_conditions)
}
#' @param cz vector of z coordinates for patch centers
#' @param wz vector of coordinates for patch depth
#' @describeIn extract_patches Extract 3D patches
#' @export
extract_patches3D <- function(im, cx, cy, cz, wx, wy, wz, boundary_conditions = 0L) {
.Call(`_imager_extract_patches3D`, im, cx, cy, cz, wx, wy, wz, boundary_conditions)
}
draw_image <- function(im, sprite, x = 0L, y = 0L, z = 0L, opacity = 1) {
.Call(`_imager_draw_image`, im, sprite, x, y, z, opacity)
}
do_patchmatch <- function(im1, im2, patch_width, patch_height, patch_depth, nb_iterations, nb_randoms, guide) {
.Call(`_imager_do_patchmatch`, im1, im2, patch_width, patch_height, patch_depth, nb_iterations, nb_randoms, guide)
}
checkcoords <- function(x, y, z, c, d) {
.Call(`_imager_checkcoords`, x, y, z, c, d)
}
cimg_omp <- function() {
.Call(`_imager_cimg_omp`)
}
set_cimg_omp <- function(mode) {
.Call(`_imager_set_cimg_omp`, mode)
}
has_omp <- function() {
.Call(`_imager_has_omp`)
}
px_split <- function(im, axis, nb = -1L) {
.Call(`_imager_px_split`, im, axis, nb)
}
read_video <- function(vpipe, cimg_array, nframes, width, height, block_size) {
.Call(`_imager_read_video`, vpipe, cimg_array, nframes, width, height, block_size)
}
save_video <- function(vpipe, cimg_array, nframes, width, height, block_size) {
.Call(`_imager_save_video`, vpipe, cimg_array, nframes, width, height, block_size)
}
|
# Decision Making
a_matrix = matrix(data = 1:9, nrow = 3, byrow = TRUE)
a_list = list(c(1:9), a_matrix)
if(is.list(a_list)){
print("A pretty list")
}else{
print("Pity!!")
}
cat("\n")
words = c("This", "is", "R Programming")
if("R Programming" %in% words){
print("Happy!!")
}else{
print("Woe!!")
} | /2_Decision_Making_Loops_and_Functions/1_Decision_Making_in_R.R | no_license | officialPrasanta/R_basics | R | false | false | 308 | r | # Decision Making
a_matrix = matrix(data = 1:9, nrow = 3, byrow = TRUE)
a_list = list(c(1:9), a_matrix)
if(is.list(a_list)){
print("A pretty list")
}else{
print("Pity!!")
}
cat("\n")
words = c("This", "is", "R Programming")
if("R Programming" %in% words){
print("Happy!!")
}else{
print("Woe!!")
} |
# Zadanie1
set.seed(10) # sianie nasionka
n1 = 5
n2 = 10
s1 = 1
s2 = 5
u1 = 0
u2 = 0
Welch <- function(a,n1,n2,u1,u2,s1,s2){
symNum = 10000 #ilosc powtorzen eksperymentu
Xe=matrix(rnorm(n1*symNum,u1,s1),n1,symNum)
Ye=matrix(rnorm(n2*symNum,u2,s2),n2,symNum)
Xme=apply(Xe,2,mean)
Xs=c(1:symNum)
for (i in 1:symNum){
Xs[i] = 1/(n1-1)*sum((Xe[,i]-Xme[i])^2)}
Yme=apply(Ye,2,mean)
Ys=c(1:symNum)
for (k in 1:symNum){
Ys[k] = 1/(n2-1)*sum((Ye[,k]-Yme[k])^2)}
Ze=(Xme-Yme)/sqrt((Xs)/n1+Ys/n2)
v = (Xs/n1 + Ys/n2)^2/((Xs^2/n1^2)/(n1-1)+(Ys^2/n2^2)/(n2-1))
wynik=0
for(j in 1:symNum){
if(Ze[j] < (-qt((1-a/2),v[j])) || Ze[j] > qt((1-a/2),v[j])){
wynik = wynik + 1
}
}
return (wynik/symNum)
}
przedzial<- function(p,N,alfa){
return(c(p - qnorm(1-alfa/2)*sqrt(p*(1-p)/N),p + qnorm(1-alfa/2)*sqrt(p*(1-p)/N)))
}
Welch(0.05, n1, n2, u1, u2, s1, s2)
przedzial(Welch(0.05, n1, n2, u1, u2, s1, s2),10000,0.05)
Welch(0.1, n1, n2, u1, u2, s1, s2)
przedzial(Welch(0.1, n1, n2, u1, u2, s1, s2),10000,0.05)
plot(wek,spr2, type="p", col="black", ylab = "Moc testu",
xlab="Parametr przesuniecia")
lines(wek,Zw,col="green",type="l")
arrows(wek, Zw-bladteo, wek, Zw+bladteo,
length=0.05, col="green", angle=90, code=3)
| /Lista2/z1.R | no_license | majsylw/statistical-methods-in-biology | R | false | false | 1,271 | r | # Zadanie1
set.seed(10) # sianie nasionka
n1 = 5
n2 = 10
s1 = 1
s2 = 5
u1 = 0
u2 = 0
Welch <- function(a,n1,n2,u1,u2,s1,s2){
symNum = 10000 #ilosc powtorzen eksperymentu
Xe=matrix(rnorm(n1*symNum,u1,s1),n1,symNum)
Ye=matrix(rnorm(n2*symNum,u2,s2),n2,symNum)
Xme=apply(Xe,2,mean)
Xs=c(1:symNum)
for (i in 1:symNum){
Xs[i] = 1/(n1-1)*sum((Xe[,i]-Xme[i])^2)}
Yme=apply(Ye,2,mean)
Ys=c(1:symNum)
for (k in 1:symNum){
Ys[k] = 1/(n2-1)*sum((Ye[,k]-Yme[k])^2)}
Ze=(Xme-Yme)/sqrt((Xs)/n1+Ys/n2)
v = (Xs/n1 + Ys/n2)^2/((Xs^2/n1^2)/(n1-1)+(Ys^2/n2^2)/(n2-1))
wynik=0
for(j in 1:symNum){
if(Ze[j] < (-qt((1-a/2),v[j])) || Ze[j] > qt((1-a/2),v[j])){
wynik = wynik + 1
}
}
return (wynik/symNum)
}
przedzial<- function(p,N,alfa){
return(c(p - qnorm(1-alfa/2)*sqrt(p*(1-p)/N),p + qnorm(1-alfa/2)*sqrt(p*(1-p)/N)))
}
Welch(0.05, n1, n2, u1, u2, s1, s2)
przedzial(Welch(0.05, n1, n2, u1, u2, s1, s2),10000,0.05)
Welch(0.1, n1, n2, u1, u2, s1, s2)
przedzial(Welch(0.1, n1, n2, u1, u2, s1, s2),10000,0.05)
plot(wek,spr2, type="p", col="black", ylab = "Moc testu",
xlab="Parametr przesuniecia")
lines(wek,Zw,col="green",type="l")
arrows(wek, Zw-bladteo, wek, Zw+bladteo,
length=0.05, col="green", angle=90, code=3)
|
packages<-function(x,
repos="https://cloud.r-project.org/", ...){
x<-as.character(match.call()[[2]])
if (!require(x,character.only=TRUE)){
install.packages(pkgs=x, repos=repos, ...)
require(x,character.only=TRUE)
}
}
packages(arm)
packages(dplyr)
packages(lattice)
packages(rv)
packages(tikzDevice)
packages(maptools)
packages(maps)
packages(mapproj)
packages(rpart)
base <- getwd()
dataDIR <- paste(base, "Data", sep="/")
plotDIR <- paste(base, "manuscript", "figures", sep="/")
packages(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = min(c(parallel::detectCores(), 8)))
nchains <- min(c(parallel::detectCores(), 8))
niters <- 100000
nkeep <- 2500
nthin <- ceiling((niters/2)*nchains/nkeep)
rv0 <- function (length = 0)
{
if (is.numeric(length)) {
x <- as.list(rep.int(0, length))
}
else {
stop("length must be numeric")
}
class(x) <- "rv"
return(x)
}
to3 <- function(x){
ifelse (x < 10, paste("00", x, sep=""),
ifelse(x < 100, paste("0", x, sep=""),
as.character(x)))
}
to2 <- function(x)
return(ifelse(x<10, paste("0", x, sep=""), as.character(x)))
hockey_smooth <- function(x, beta0, beta1=0, delta,
phi, theta=NULL){
if (is.null(theta)) theta=0.01*diff(range(x))
return(beta0 + beta1 * (x-phi) +
delta * theta * log1p(exp((x-phi)/theta)))
}
| /R_code/FrontMatter.R | no_license | songsqian/mc-chla | R | false | false | 1,484 | r | packages<-function(x,
repos="https://cloud.r-project.org/", ...){
x<-as.character(match.call()[[2]])
if (!require(x,character.only=TRUE)){
install.packages(pkgs=x, repos=repos, ...)
require(x,character.only=TRUE)
}
}
packages(arm)
packages(dplyr)
packages(lattice)
packages(rv)
packages(tikzDevice)
packages(maptools)
packages(maps)
packages(mapproj)
packages(rpart)
base <- getwd()
dataDIR <- paste(base, "Data", sep="/")
plotDIR <- paste(base, "manuscript", "figures", sep="/")
packages(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = min(c(parallel::detectCores(), 8)))
nchains <- min(c(parallel::detectCores(), 8))
niters <- 100000
nkeep <- 2500
nthin <- ceiling((niters/2)*nchains/nkeep)
rv0 <- function (length = 0)
{
if (is.numeric(length)) {
x <- as.list(rep.int(0, length))
}
else {
stop("length must be numeric")
}
class(x) <- "rv"
return(x)
}
to3 <- function(x){
ifelse (x < 10, paste("00", x, sep=""),
ifelse(x < 100, paste("0", x, sep=""),
as.character(x)))
}
to2 <- function(x)
return(ifelse(x<10, paste("0", x, sep=""), as.character(x)))
hockey_smooth <- function(x, beta0, beta1=0, delta,
phi, theta=NULL){
if (is.null(theta)) theta=0.01*diff(range(x))
return(beta0 + beta1 * (x-phi) +
delta * theta * log1p(exp((x-phi)/theta)))
}
|
#=============================================================================#
# Authors: Alex Perkins, Sean Cavany, Sean Moore, Rachel Oidtman, Anita Lerch, Marya Poterek
# project: Estimating unobserved SARS-CoV-2 infections in the United States
# Year: 2020
#
# Code to generate all figures and results from main text
#
#=============================================================================#
# set up workspace
#=============================================================================#
# load libraries
library(extraDistr)
library(doParallel)
library(mc2d)
library(MASS)
library(boot)
# load function to simulate autochthonous transmission
source('simOutbreak.R')
# set random number seed
set.seed(1234)
#=============================================================================#
# load in and process data
#=============================================================================#
# read in line list data for US
# updated 20200312
# data from https://github.com/midas-network/COVID-19/tree/master/data/cases/global/line_listings_nihfogarty
linelist = read.csv('../data/2020_03_12_1800EST_linelist_NIHFogarty.csv')
yesUS = subset(linelist, country=='USA')
# remove Diamond Princess repatriated cases
yesUS = yesUS[grep("Diamond",yesUS$summary,invert=T),]
# fit gamma parameters for symptom to report delay
data.delay = as.Date(yesUS$reporting.date) - as.Date(yesUS$symptom_onset)
data.delay = as.numeric(data.delay[which(!is.na(data.delay))])
delay.shape.baseline = MASS::fitdistr(data.delay,dgamma,start=list(shape=0.5,rate=0.5))$estimate[1]
delay.rate = MASS::fitdistr(data.delay,dgamma,start=list(shape=0.5,rate=0.5))$estimate[2]
# number of travelers that were cases or died
num.CF = c(
nrow(subset(yesUS,international_traveler>0))-sum(subset(yesUS,international_traveler>0)$death>0,
na.rm=T),
sum(subset(yesUS,international_traveler>0)$death>0,na.rm=T))
# read in case data internationally
# updated 20200307
# data from https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series
ts = read.csv('../data/time_series_19-covid-Confirmed.csv')
ts.natl = matrix(0,length(unique(ts$Country.Region)),ncol(ts)-4)
for(ii in 1:ncol(ts.natl)){
ts.natl[,ii] = aggregate(ts[,4+ii],by=list(ts$Country.Region),FUN=sum)[,2]
}
row.names(ts.natl) = aggregate(ts[,4+ii],by=list(ts$Country.Region),FUN=sum)[,1]
for(ii in 1:nrow(ts.natl)){
ts.natl[ii,-1] = pmax(0,diff(ts.natl[ii,]))
}
# correct for travel ban from China (for non-US citizens starting 2/2 at 5pm) - so 0 out starting 2/3
colnames(ts.natl) = 22:(ncol(ts.natl)+21)
which(colnames(ts.natl)==34)
ts.natl['China',which(colnames(ts.natl)==34):ncol(ts.natl)] = 0
# read in death data internationally
# updated 20200307
# data from https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series
tsd = read.csv('../data/time_series_19-covid-Deaths.csv')
tsd.natl = matrix(0,length(unique(tsd$Country.Region)),ncol(tsd)-4)
for(ii in 1:ncol(ts.natl)){
tsd.natl[,ii] = aggregate(tsd[,4+ii],by=list(tsd$Country.Region),FUN=sum)[,2]
}
row.names(tsd.natl) = aggregate(tsd[,4+ii],by=list(tsd$Country.Region),FUN=sum)[,1]
for(ii in 1:nrow(tsd.natl)){
tsd.natl[ii,-1] = pmax(0,diff(tsd.natl[ii,]))
}
colnames(tsd.natl) = 22:(ncol(tsd.natl)+21)
# count up local cases by day in the US
cases.US.total = c(rep(0,21),ts.natl['US',])
cases.US.imported =
table(
as.Date(as.character(subset(yesUS,international_traveler>0)$reporting.date)) -
as.Date('2019-12-31'))
tmp = rep(0,length(cases.US.total))
tmp[as.numeric(names(cases.US.imported))] = cases.US.imported
cases.US.imported = tmp
rm(tmp)
cases.US.local = pmax(0, cases.US.total - cases.US.imported)
# count up local deaths by day in the US
deaths.US.total = c(rep(0,21),tsd.natl['US',])
deaths.US.imported =
table(
as.Date(as.character(subset(yesUS,international_traveler>0&death>0)$reporting.date)) -
as.Date('2019-12-31'))
tmp = rep(0,length(deaths.US.total))
tmp[as.numeric(names(deaths.US.imported))] = deaths.US.imported
deaths.US.imported = tmp
rm(tmp)
deaths.US.local = pmax(0, deaths.US.total - deaths.US.imported)
#=============================================================================#
# simulate imported infections
#=============================================================================#
# sample replicates of how many infections have been imported into the US
maxUS = 2e4
rangeUS = sum(yesUS$international_traveler>0,na.rm=T):maxUS
# estimate for asymptomatic proportion based on
# https://www.medrxiv.org/content/10.1101/2020.02.20.20025866v2
PrAsymptomatic = exp(optim(par=c(0,0),fn=function(par){
sum((
qbeta(c(0.5,0.025,0.975),exp(par[1]),exp(par[2])) -
c(0.179,0.155,0.202)) ^ 2)})$par)
# estimate for proportion of symptomatic infections resulting in death based on
# http://weekly.chinacdc.cn/en/article/id/e53946e2-c6c4-41e9-9a9b-fea8db1a8f51
PrDeathSymptom = c(1+1023,1+44672-1023)
# set values of unknown parameters
# note that these values seem to maximize the probability of the cumulative
# deaths in the US as of March 8, 2020 predicted by the model
replicates = 1000
load("../results/sensitivity/param_estimates_posterior_1.rda", verbose=T)
indices = sample(1:length(PrCaseSymptom.trav_posterior), replicates, replace=TRUE)
PrCaseSymptom.trav = PrCaseSymptom.trav_posterior[indices]
asympRFraction = asympRFraction_posterior[indices]
# sample from uncertainty about proportions of infection outcomes
propns.ASCF = cbind(
rbeta(replicates,PrAsymptomatic[1],PrAsymptomatic[2]),
rbeta(replicates,PrDeathSymptom[1],PrDeathSymptom[2]))
propns.ASCF = cbind(
propns.ASCF[,1],
(1-propns.ASCF[,1]) * (1-PrCaseSymptom.trav) * (1-propns.ASCF[,2]),
(1-propns.ASCF[,1]) * PrCaseSymptom.trav * (1-propns.ASCF[,2]),
(1-propns.ASCF[,1]) * propns.ASCF[,2])
# draw samples of the number of imported infections
imports = numeric(length=replicates)
for(ii in 1:replicates){
PrImportedInfections =
dmultinomial(
x = cbind(
0:(maxUS-sum(num.CF)),
num.CF[1],num.CF[2]),
prob = c(sum(propns.ASCF[ii,1:2]),propns.ASCF[ii,3:4]))
imports[ii] =
sample(
sum(num.CF):maxUS,
1,
prob=PrImportedInfections,
replace=T)
}
# draw samples of the day on which imported infections arrived
case.days=vector()
for(i in 1:length(cases.US.imported)){
if(cases.US.imported[i]>0){
if(length(case.days)==0){
case.days=rep(i,cases.US.imported[i])
}else{
case.days=c(case.days,rep(i,cases.US.imported[i]))
}
}
}
import.case.density = density(
case.days,
from = 1,
to = length(cases.US.imported),
n = length(cases.US.imported))$y
# estimate the day of the year on which imports occur
import.doy = list()
for(ii in 1:replicates){
import.doy[[ii]] = sample(
1:length(cases.US.imported),
imports[ii],
prob=import.case.density,
replace=T)
}
#=============================================================================#
# simulate local transmission
#=============================================================================#
# simulate local transmission for each draw of imported infections
local = foreach(ii = 1:replicates) %do% {
simOutbreak(
timeImport = import.doy[[ii]], # timing of each imported infection
R = 1.97, # reproduction number
k = 1e3, # dispersion parameter
si_mean = 4.56, # mean of serial interval distribution
si_sd = 0.95, # standard deviation of serial interval distribution
inc_shape = 1.88, # shape parameter of incubation period distribution
inc_scale = 7.97, # scale parameter of incubation period distribution
symp_to_death_mean = 14, # mean of time between symptom onset and death
symp_to_death_sd = 5.37, # std. dev. of time between symptom onset and death
report_delay_shape = delay.shape.baseline, # shape parameter for delay between symptom and reporting
report_delay_rate = delay.rate, # rate parameter for delay between symptom and reporting
stopSimulationDay = length(cases.US.imported), # day of year since Jan 1 when simulation stops
asympProp = propns.ASCF[ii,1], # proportion of infections that are asymptomatic
asympRFraction = asympRFraction[ii], # relative infectiousness of asymptomatics
lnormFlag = F # toggles whether serial interval distribution is lognormal
)
}
#simulate deaths out but turn transmission off on 12 March
local.predict = foreach(ii = 1:replicates) %do% {
simOutbreakR0Change(
timeImport = import.doy[[ii]], # timing of each imported infection
R = 1.97, # reproduction number
k = 1e3, # dispersion parameter
si_mean = 4.56, # mean of serial interval distribution
si_sd = 0.95, # standard deviation of serial interval distribution
inc_shape = 1.88, # shape parameter of incubation period distribution
inc_scale = 7.97, # scale parameter of incubation period distribution
symp_to_death_mean = 14, # mean of time between symptom onset and death
symp_to_death_sd = 5.37, # std. dev. of time between symptom onset and death
report_delay_shape = delay.shape.baseline, # shape parameter for delay between symptom and reporting
report_delay_rate = delay.rate, # rate parameter for delay between symptom and reporting
stopSimulationDay = 180, # day of year since Jan 1 when simulation stops
asympProp = propns.ASCF[ii,1], # proportion of infections that are asymptomatic
asympRFraction = asympRFraction[ii], # relative infectiousness of asymptomatics
lnormFlag = F, # toggles whether serial interval distribution is lognormal
RChangeDay = length(cases.US.imported), # determines when R changes
RChange = 0 # determines what R drops to at R0ChangeDay
)
}
# load the following to generate the objects used to generate the figures in the paper
load("../results/objects_used_in_paper.RData",verbose=T)
#=============================================================================#
# produce plots and results for all main text figures
#=============================================================================#
# set figure margins
par(mar=c(4,5,1,1))
# Infections and imports results - processing
local.mat = t(matrix(
unlist(lapply(local, function(x) x$daily)),
length(local[[1]]$daily),
replicates))
# Infections and imports results - quantities
quantile(PrCaseSymptom.trav_posterior,c(0.025, 0.5, 0.975))
quantile(unlist(lapply(local,function(ll)ll$cum)),c(0.025,0.5,0.975))
quantile(local.mat[,ncol(local.mat)],c(0.025,0.5,0.975))
# Figure 1
# plot distribution of cumulative infections
# plot all locally acquired infections over time
pdf('../plots/figure_1_cumulative_infections_and_infections_daily.pdf',
width=9,height=5, pointsize=14)
par(mfrow=c(1,2))
hist(
unlist(lapply(local,function(ll)ll$cum)),
col='gray',xlab='Cumulative infections',
ylab='Number of simulations',main='',las=1)
mtext("A",side=3,line=0,
at=par("usr")[1]+0.05*diff(par("usr")[1:2]),
cex=1.2)
plot(
as.Date('2019-12-31') + 1:ncol(local.mat),
apply(local.mat,2,function(ii)median(ii,na.rm=T)),
ylim=c(0,quantile(local.mat[,ncol(local.mat)],0.975)),col=1,lwd=2,type='l',xaxs='i',yaxs='i',las=1,
xlim=as.Date('2019-12-31') + c(31,ncol(local.mat)),
xlab='Date',ylab='Infections',main='')
polygon(
c(as.Date('2019-12-31') + 1:ncol(local.mat),
rev(as.Date('2019-12-31') + 1:ncol(local.mat))),
c(apply(local.mat,2,function(ii)quantile(ii,0.025,na.rm=T)),
rev(apply(local.mat,2,function(ii)quantile(ii,0.975,na.rm=T)))),
border=NA,col=rgb(0,0,0,0.25))
mtext("B",side=3,line=0,
at=par("usr")[1]+0.05*diff(par("usr")[1:2]),
cex=1.2)
dev.off()
# Cases results - processing
updateDaily = FALSE # turn on Bayesian daily updating
smoothSpline = TRUE # turn on smoothing spline
cases.mat = t(matrix(
unlist(lapply(local, function(x) x$cases)),
length(local[[1]]$cases),
replicates))
p.mat = matrix(NA,nrow(cases.mat),ncol(cases.mat))
cases.mat.obs = rbinom(length(cases.mat), as.vector(cases.mat), rowSums(propns.ASCF[,2:3]))
cases.mat.obs = matrix(cases.mat.obs, replicates, ncol(cases.mat))
for(ii in 1:nrow(cases.mat)){
alpha.old=1
beta.old=1
for(jj in 1:ncol(cases.mat)){
if(cases.mat[ii,jj]){
actual.cases = rbinom(1,cases.mat[ii,jj], sum(propns.ASCF[ii,2:3]))
alpha.new = alpha.old+cases.US.local[jj]
beta.new = beta.old+actual.cases-cases.US.local[jj]
p.mat[ii,jj] =
rbeta(1,alpha.new,max(1,beta.new))
if (updateDaily) {
alpha.old=alpha.new
beta.old=beta.new
}
}
}
if (smoothSpline) {
non.NA.indices = which(!is.na(p.mat[ii,]))
if(length(non.NA.indices) > ncol(p.mat) / 3){
temp.sp = smooth.spline((1:ncol(p.mat))[non.NA.indices],
logit(p.mat[ii,non.NA.indices]),
nknots=floor((ncol(p.mat) - non.NA.indices[1])/7 + 0.5))
p.mat[ii,non.NA.indices[1]:ncol(p.mat)] = inv.logit(predict(temp.sp, non.NA.indices[1]:ncol(p.mat))$y)
} else {
p.mat[ii,non.NA.indices[1]:ncol(p.mat)] = NA
}
}
}
testing = read.csv("../data/testing_ts.csv")
incomplete.days = testing$Day[!testing$Complete]
low.p = which.min(apply(p.mat,2,function(ii)median(ii,na.rm=T)))
# Cases results - quantities
low.p+as.Date('2019-12-31')
apply(p.mat,2,function(x)quantile(x,c(0.025,0.5,0.975),na.rm=T))[,low.p]
apply(p.mat,2,function(x)quantile(x,c(0.025,0.5,0.975),na.rm=T))[,length(cases.US.local)]
quantile(rowSums(ifelse(p.mat < 0.2,1,0),na.rm=T), c(0.025,0.5,0.975))
# obtain correlations of p.mat and testing
totalTestsPerDay = c(rep(0, min(testing$Day)-1),
testing$Total[1:which(testing$Day==ncol(p.mat))])
start.day.cor = low.p
end.day.cor = max(testing$Day[testing$Complete])
correlation.reduced = cor(t(p.mat[,start.day.cor:end.day.cor]),
totalTestsPerDay[start.day.cor:end.day.cor], use="pairwise")
quantile(correlation.reduced,c(0.025,0.5,0.975))
# Figure 2
# plot locally acquired symptomatic infections over time, alongside number of cases reported in the US
# plot proportion of locally acquired symptomatic infections reported over time
# alongside numbers of tests administered in the US
pdf('../plots/figure_2_symptomatic_daily_and_symptomatic_detected.pdf',
width=9,height=4.8, pointsize=14)
par(mfrow=c(1,2))
par(mar = c(5, 4, 4, 4) + 0.3)
plot(
as.Date('2019-12-31') + 1:ncol(cases.mat),
apply(cases.mat.obs,2,function(ii)median(ii,na.rm=T)),
ylim=c(0,quantile(cases.mat.obs[,ncol(cases.mat)],0.975)),
col=1,lwd=2,type='l',xaxs='i',yaxs='i',las=1,
xlim=as.Date('2019-12-31') + c(31,ncol(cases.mat)),
xlab='Date',ylab='Symptomatic infections',main='')
polygon(
c(as.Date('2019-12-31') + 1:ncol(cases.mat),
rev(as.Date('2019-12-31') + 1:ncol(cases.mat))),
c(apply(cases.mat.obs,2,function(ii)quantile(ii,0.025,na.rm=T)),
rev(apply(cases.mat.obs,2,function(ii)quantile(ii,0.975,na.rm=T)))),
border=NA,col=rgb(0,0,0,0.25))
mtext("A",side=3,line=0,
at=par("usr")[1]+0.05*diff(par("usr")[1:2]),
cex=1.2)
par(new = TRUE)
plot(as.Date('2019-12-31') + 1:ncol(cases.mat),
cases.US.local, type="l", col="red",
axes=F, bty = "n", xlab = "", ylab = "",
xlim=as.Date('2019-12-31') + c(31,ncol(cases.mat)), lwd=2,
xaxs='i',yaxs='i')
axis(side=4, at = pretty(range(cases.US.local)), col="red", col.axis="red",las=1)
mtext("Reported cases", side=4, line=3, lwd=2, col="red")
legend("topleft", col=c("red", "black"), lty="solid",
legend=rev(c("Model", "Data")),
bty="n", lwd=2)
plot(
as.Date('2019-12-31') + 1:ncol(p.mat),
apply(p.mat,2,function(ii)median(ii,na.rm=T)),
ylim=c(0,1),col=1,lwd=2,type='l',xaxs='i',yaxs='i',las=1,
xlim=as.Date('2019-12-31') + c(31,ncol(p.mat)),
xlab='Date',ylab='Symptomatics reporting',
main='')
polygon(
c(as.Date('2019-12-31') + 1:ncol(p.mat),
rev(as.Date('2019-12-31') + 1:ncol(p.mat))),
c(apply(p.mat,2,function(ii)quantile(ii,0.025,na.rm=T)),
rev(apply(p.mat,2,function(ii)quantile(ii,0.975,na.rm=T)))),
border=NA,col=rgb(0,0,0,0.25))
par(new = TRUE)
plot(as.Date('2019-12-31') + testing$Day[testing$Complete],
testing$Total[testing$Complete], type="l", col="red",
axes=F, bty = "n", xlab = "", ylab = "",
xlim=as.Date('2019-12-31') + c(31,ncol(cases.mat)), lwd=2,
xaxs='i',yaxs='i')
axis(side=4, at = pretty(range(testing$Total)), col="red", col.axis="red",las=1)
mtext("Tests administered", side=4, line=3, lwd=2, col="red")
legend("top", col=c("red", "black"), lty="solid",
legend=c("Data", "Model"),
bty="n", lwd=2)
mtext("B",side=3,line=0,
at=par("usr")[1]+0.05*diff(par("usr")[1:2]),
cex=1.2)
dev.off()
# Deaths results - processing
local.predict.death = t(matrix(unlist(lapply(local.predict, function(x) x$death)), length(local.predict[[1]]$death), replicates))
local.predict.death = rbinom(length(local.predict.death), as.vector(local.predict.death), propns.ASCF[,4])
local.predict.death = matrix(local.predict.death, replicates, length(local.predict[[1]]$death))
death.mat = t(matrix(
unlist(lapply(local, function(x) x$death)),
length(local[[1]]$death),
replicates))
death.mat.obs = rbinom(length(death.mat), as.vector(death.mat), propns.ASCF[,4])
death.mat = matrix(death.mat.obs, replicates, ncol(death.mat))
all.death.mat = t(matrix(
unlist(lapply(local, function(x) x$daily)),
length(local[[1]]$cases),
replicates))
all.death.mat.obs = rbinom(length(all.death.mat), as.vector(all.death.mat), propns.ASCF[,4])
all.death.mat = matrix(all.death.mat.obs, replicates, ncol(all.death.mat))
future.death.mat = all.death.mat-death.mat
death.day.min = which(deaths.US.local!=0)[1]
death.day.max = length(deaths.US.local)
quantile(rowSums(death.mat),c(0.025,0.5,0.975))
sum(deaths.US.local)
sum(death.mat[,death.day.min:death.day.max])/sum(death.mat)
quantile(rowSums(future.death.mat),c(0.025,0.5,0.975))
quantile(rowSums(future.death.mat)/rowSums(death.mat),c(0.025,0.5,0.975))
# Figure 3
# plot future deaths, assuming transmission stops on March 12
pdf('../plots/figure_3_deaths_forecast.pdf',width=9,height=6,pointsize=14)
time.max = 150#dim(local.predict.death)[2]
death.predict.median = apply(local.predict.death[,], 2, median)
death.predict.025 = apply(local.predict.death[,], 2, function(x)quantile(x,0.025))
death.predict.975 = apply(local.predict.death[,], 2, function(x)quantile(x,0.975))
times = seq(from=as.Date("2020-01-01"), by="1 day", length.out=time.max)
plot(times, c(deaths.US.local, rep(NA, time.max-length(deaths.US.local))),
xlim = c(as.Date("2020-02-01"),as.Date("2020-05-15")), ylim = c(0,max(death.predict.975)),
col="red", type="l", xlab="Month", ylab="Deaths", lwd=2, main="",xaxs='i',yaxs='i',las=1, xaxt = 'n')
month_starts = c(1, 30, 61, 91)
axis(side = 1, at = times[which(times %in% as.Date("2020-02-01"): as.Date("2020-05-15"))][month_starts], labels = F)
axis(side = 1, at = times[which(times %in% as.Date("2020-02-01"): as.Date("2020-05-15"))][month_starts + 15], tick = F,
labels = c('Feb', 'Mar', 'Apr', 'May'))
lines(times, death.predict.median[1:time.max], col="black", lwd=2)
polygon(
c(times, rev(times)),
c(death.predict.975[1:time.max],
rev(death.predict.025[1:time.max])),
border=NA,col=rgb(0,0,0,alpha=0.25))
abline(v=as.Date("2020-03-12"), lty="dashed")
legend("topleft",lty=rep("solid",2),lwd=2,
legend=c("Data", "Model"),col=c("red","black"),
bty='n')
dev.off()
| /code/script_main.R | permissive | kmfolgar/sarscov2_unobserved | R | false | false | 19,559 | r | #=============================================================================#
# Authors: Alex Perkins, Sean Cavany, Sean Moore, Rachel Oidtman, Anita Lerch, Marya Poterek
# project: Estimating unobserved SARS-CoV-2 infections in the United States
# Year: 2020
#
# Code to generate all figures and results from main text
#
#=============================================================================#
# set up workspace
#=============================================================================#
# load libraries
library(extraDistr)
library(doParallel)
library(mc2d)
library(MASS)
library(boot)
# load function to simulate autochthonous transmission
source('simOutbreak.R')
# set random number seed
set.seed(1234)
#=============================================================================#
# load in and process data
#=============================================================================#
# read in line list data for US
# updated 20200312
# data from https://github.com/midas-network/COVID-19/tree/master/data/cases/global/line_listings_nihfogarty
linelist = read.csv('../data/2020_03_12_1800EST_linelist_NIHFogarty.csv')
yesUS = subset(linelist, country=='USA')
# remove Diamond Princess repatriated cases
yesUS = yesUS[grep("Diamond",yesUS$summary,invert=T),]
# fit gamma parameters for symptom to report delay
data.delay = as.Date(yesUS$reporting.date) - as.Date(yesUS$symptom_onset)
data.delay = as.numeric(data.delay[which(!is.na(data.delay))])
delay.shape.baseline = MASS::fitdistr(data.delay,dgamma,start=list(shape=0.5,rate=0.5))$estimate[1]
delay.rate = MASS::fitdistr(data.delay,dgamma,start=list(shape=0.5,rate=0.5))$estimate[2]
# number of travelers that were cases or died
num.CF = c(
nrow(subset(yesUS,international_traveler>0))-sum(subset(yesUS,international_traveler>0)$death>0,
na.rm=T),
sum(subset(yesUS,international_traveler>0)$death>0,na.rm=T))
# read in case data internationally
# updated 20200307
# data from https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series
ts = read.csv('../data/time_series_19-covid-Confirmed.csv')
ts.natl = matrix(0,length(unique(ts$Country.Region)),ncol(ts)-4)
for(ii in 1:ncol(ts.natl)){
ts.natl[,ii] = aggregate(ts[,4+ii],by=list(ts$Country.Region),FUN=sum)[,2]
}
row.names(ts.natl) = aggregate(ts[,4+ii],by=list(ts$Country.Region),FUN=sum)[,1]
for(ii in 1:nrow(ts.natl)){
ts.natl[ii,-1] = pmax(0,diff(ts.natl[ii,]))
}
# correct for travel ban from China (for non-US citizens starting 2/2 at 5pm) - so 0 out starting 2/3
colnames(ts.natl) = 22:(ncol(ts.natl)+21)
which(colnames(ts.natl)==34)
ts.natl['China',which(colnames(ts.natl)==34):ncol(ts.natl)] = 0
# read in death data internationally
# updated 20200307
# data from https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series
tsd = read.csv('../data/time_series_19-covid-Deaths.csv')
tsd.natl = matrix(0,length(unique(tsd$Country.Region)),ncol(tsd)-4)
for(ii in 1:ncol(ts.natl)){
tsd.natl[,ii] = aggregate(tsd[,4+ii],by=list(tsd$Country.Region),FUN=sum)[,2]
}
row.names(tsd.natl) = aggregate(tsd[,4+ii],by=list(tsd$Country.Region),FUN=sum)[,1]
for(ii in 1:nrow(tsd.natl)){
tsd.natl[ii,-1] = pmax(0,diff(tsd.natl[ii,]))
}
colnames(tsd.natl) = 22:(ncol(tsd.natl)+21)
# count up local cases by day in the US
cases.US.total = c(rep(0,21),ts.natl['US',])
cases.US.imported =
table(
as.Date(as.character(subset(yesUS,international_traveler>0)$reporting.date)) -
as.Date('2019-12-31'))
tmp = rep(0,length(cases.US.total))
tmp[as.numeric(names(cases.US.imported))] = cases.US.imported
cases.US.imported = tmp
rm(tmp)
cases.US.local = pmax(0, cases.US.total - cases.US.imported)
# count up local deaths by day in the US
deaths.US.total = c(rep(0,21),tsd.natl['US',])
deaths.US.imported =
table(
as.Date(as.character(subset(yesUS,international_traveler>0&death>0)$reporting.date)) -
as.Date('2019-12-31'))
tmp = rep(0,length(deaths.US.total))
tmp[as.numeric(names(deaths.US.imported))] = deaths.US.imported
deaths.US.imported = tmp
rm(tmp)
deaths.US.local = pmax(0, deaths.US.total - deaths.US.imported)
#=============================================================================#
# simulate imported infections
#=============================================================================#
# sample replicates of how many infections have been imported into the US
maxUS = 2e4
rangeUS = sum(yesUS$international_traveler>0,na.rm=T):maxUS
# estimate for asymptomatic proportion based on
# https://www.medrxiv.org/content/10.1101/2020.02.20.20025866v2
PrAsymptomatic = exp(optim(par=c(0,0),fn=function(par){
sum((
qbeta(c(0.5,0.025,0.975),exp(par[1]),exp(par[2])) -
c(0.179,0.155,0.202)) ^ 2)})$par)
# estimate for proportion of symptomatic infections resulting in death based on
# http://weekly.chinacdc.cn/en/article/id/e53946e2-c6c4-41e9-9a9b-fea8db1a8f51
PrDeathSymptom = c(1+1023,1+44672-1023)
# set values of unknown parameters
# note that these values seem to maximize the probability of the cumulative
# deaths in the US as of March 8, 2020 predicted by the model
replicates = 1000
load("../results/sensitivity/param_estimates_posterior_1.rda", verbose=T)
indices = sample(1:length(PrCaseSymptom.trav_posterior), replicates, replace=TRUE)
PrCaseSymptom.trav = PrCaseSymptom.trav_posterior[indices]
asympRFraction = asympRFraction_posterior[indices]
# sample from uncertainty about proportions of infection outcomes
propns.ASCF = cbind(
rbeta(replicates,PrAsymptomatic[1],PrAsymptomatic[2]),
rbeta(replicates,PrDeathSymptom[1],PrDeathSymptom[2]))
propns.ASCF = cbind(
propns.ASCF[,1],
(1-propns.ASCF[,1]) * (1-PrCaseSymptom.trav) * (1-propns.ASCF[,2]),
(1-propns.ASCF[,1]) * PrCaseSymptom.trav * (1-propns.ASCF[,2]),
(1-propns.ASCF[,1]) * propns.ASCF[,2])
# draw samples of the number of imported infections
imports = numeric(length=replicates)
for(ii in 1:replicates){
PrImportedInfections =
dmultinomial(
x = cbind(
0:(maxUS-sum(num.CF)),
num.CF[1],num.CF[2]),
prob = c(sum(propns.ASCF[ii,1:2]),propns.ASCF[ii,3:4]))
imports[ii] =
sample(
sum(num.CF):maxUS,
1,
prob=PrImportedInfections,
replace=T)
}
# draw samples of the day on which imported infections arrived
case.days=vector()
for(i in 1:length(cases.US.imported)){
if(cases.US.imported[i]>0){
if(length(case.days)==0){
case.days=rep(i,cases.US.imported[i])
}else{
case.days=c(case.days,rep(i,cases.US.imported[i]))
}
}
}
import.case.density = density(
case.days,
from = 1,
to = length(cases.US.imported),
n = length(cases.US.imported))$y
# estimate the day of the year on which imports occur
import.doy = list()
for(ii in 1:replicates){
import.doy[[ii]] = sample(
1:length(cases.US.imported),
imports[ii],
prob=import.case.density,
replace=T)
}
#=============================================================================#
# simulate local transmission
#=============================================================================#
# simulate local transmission for each draw of imported infections
local = foreach(ii = 1:replicates) %do% {
simOutbreak(
timeImport = import.doy[[ii]], # timing of each imported infection
R = 1.97, # reproduction number
k = 1e3, # dispersion parameter
si_mean = 4.56, # mean of serial interval distribution
si_sd = 0.95, # standard deviation of serial interval distribution
inc_shape = 1.88, # shape parameter of incubation period distribution
inc_scale = 7.97, # scale parameter of incubation period distribution
symp_to_death_mean = 14, # mean of time between symptom onset and death
symp_to_death_sd = 5.37, # std. dev. of time between symptom onset and death
report_delay_shape = delay.shape.baseline, # shape parameter for delay between symptom and reporting
report_delay_rate = delay.rate, # rate parameter for delay between symptom and reporting
stopSimulationDay = length(cases.US.imported), # day of year since Jan 1 when simulation stops
asympProp = propns.ASCF[ii,1], # proportion of infections that are asymptomatic
asympRFraction = asympRFraction[ii], # relative infectiousness of asymptomatics
lnormFlag = F # toggles whether serial interval distribution is lognormal
)
}
#simulate deaths out but turn transmission off on 12 March
local.predict = foreach(ii = 1:replicates) %do% {
simOutbreakR0Change(
timeImport = import.doy[[ii]], # timing of each imported infection
R = 1.97, # reproduction number
k = 1e3, # dispersion parameter
si_mean = 4.56, # mean of serial interval distribution
si_sd = 0.95, # standard deviation of serial interval distribution
inc_shape = 1.88, # shape parameter of incubation period distribution
inc_scale = 7.97, # scale parameter of incubation period distribution
symp_to_death_mean = 14, # mean of time between symptom onset and death
symp_to_death_sd = 5.37, # std. dev. of time between symptom onset and death
report_delay_shape = delay.shape.baseline, # shape parameter for delay between symptom and reporting
report_delay_rate = delay.rate, # rate parameter for delay between symptom and reporting
stopSimulationDay = 180, # day of year since Jan 1 when simulation stops
asympProp = propns.ASCF[ii,1], # proportion of infections that are asymptomatic
asympRFraction = asympRFraction[ii], # relative infectiousness of asymptomatics
lnormFlag = F, # toggles whether serial interval distribution is lognormal
RChangeDay = length(cases.US.imported), # determines when R changes
RChange = 0 # determines what R drops to at R0ChangeDay
)
}
# load the following to generate the objects used to generate the figures in the paper
load("../results/objects_used_in_paper.RData",verbose=T)
#=============================================================================#
# produce plots and results for all main text figures
#=============================================================================#
# set figure margins
par(mar=c(4,5,1,1))
# Infections and imports results - processing
local.mat = t(matrix(
unlist(lapply(local, function(x) x$daily)),
length(local[[1]]$daily),
replicates))
# Infections and imports results - quantities
quantile(PrCaseSymptom.trav_posterior,c(0.025, 0.5, 0.975))
quantile(unlist(lapply(local,function(ll)ll$cum)),c(0.025,0.5,0.975))
quantile(local.mat[,ncol(local.mat)],c(0.025,0.5,0.975))
# Figure 1
# plot distribution of cumulative infections
# plot all locally acquired infections over time
pdf('../plots/figure_1_cumulative_infections_and_infections_daily.pdf',
width=9,height=5, pointsize=14)
par(mfrow=c(1,2))
hist(
unlist(lapply(local,function(ll)ll$cum)),
col='gray',xlab='Cumulative infections',
ylab='Number of simulations',main='',las=1)
mtext("A",side=3,line=0,
at=par("usr")[1]+0.05*diff(par("usr")[1:2]),
cex=1.2)
plot(
as.Date('2019-12-31') + 1:ncol(local.mat),
apply(local.mat,2,function(ii)median(ii,na.rm=T)),
ylim=c(0,quantile(local.mat[,ncol(local.mat)],0.975)),col=1,lwd=2,type='l',xaxs='i',yaxs='i',las=1,
xlim=as.Date('2019-12-31') + c(31,ncol(local.mat)),
xlab='Date',ylab='Infections',main='')
polygon(
c(as.Date('2019-12-31') + 1:ncol(local.mat),
rev(as.Date('2019-12-31') + 1:ncol(local.mat))),
c(apply(local.mat,2,function(ii)quantile(ii,0.025,na.rm=T)),
rev(apply(local.mat,2,function(ii)quantile(ii,0.975,na.rm=T)))),
border=NA,col=rgb(0,0,0,0.25))
mtext("B",side=3,line=0,
at=par("usr")[1]+0.05*diff(par("usr")[1:2]),
cex=1.2)
dev.off()
# Cases results - processing
updateDaily = FALSE # turn on Bayesian daily updating
smoothSpline = TRUE # turn on smoothing spline
cases.mat = t(matrix(
unlist(lapply(local, function(x) x$cases)),
length(local[[1]]$cases),
replicates))
p.mat = matrix(NA,nrow(cases.mat),ncol(cases.mat))
cases.mat.obs = rbinom(length(cases.mat), as.vector(cases.mat), rowSums(propns.ASCF[,2:3]))
cases.mat.obs = matrix(cases.mat.obs, replicates, ncol(cases.mat))
for(ii in 1:nrow(cases.mat)){
alpha.old=1
beta.old=1
for(jj in 1:ncol(cases.mat)){
if(cases.mat[ii,jj]){
actual.cases = rbinom(1,cases.mat[ii,jj], sum(propns.ASCF[ii,2:3]))
alpha.new = alpha.old+cases.US.local[jj]
beta.new = beta.old+actual.cases-cases.US.local[jj]
p.mat[ii,jj] =
rbeta(1,alpha.new,max(1,beta.new))
if (updateDaily) {
alpha.old=alpha.new
beta.old=beta.new
}
}
}
if (smoothSpline) {
non.NA.indices = which(!is.na(p.mat[ii,]))
if(length(non.NA.indices) > ncol(p.mat) / 3){
temp.sp = smooth.spline((1:ncol(p.mat))[non.NA.indices],
logit(p.mat[ii,non.NA.indices]),
nknots=floor((ncol(p.mat) - non.NA.indices[1])/7 + 0.5))
p.mat[ii,non.NA.indices[1]:ncol(p.mat)] = inv.logit(predict(temp.sp, non.NA.indices[1]:ncol(p.mat))$y)
} else {
p.mat[ii,non.NA.indices[1]:ncol(p.mat)] = NA
}
}
}
testing = read.csv("../data/testing_ts.csv")
incomplete.days = testing$Day[!testing$Complete]
low.p = which.min(apply(p.mat,2,function(ii)median(ii,na.rm=T)))
# Cases results - quantities
low.p+as.Date('2019-12-31')
apply(p.mat,2,function(x)quantile(x,c(0.025,0.5,0.975),na.rm=T))[,low.p]
apply(p.mat,2,function(x)quantile(x,c(0.025,0.5,0.975),na.rm=T))[,length(cases.US.local)]
quantile(rowSums(ifelse(p.mat < 0.2,1,0),na.rm=T), c(0.025,0.5,0.975))
# obtain correlations of p.mat and testing
totalTestsPerDay = c(rep(0, min(testing$Day)-1),
testing$Total[1:which(testing$Day==ncol(p.mat))])
start.day.cor = low.p
end.day.cor = max(testing$Day[testing$Complete])
correlation.reduced = cor(t(p.mat[,start.day.cor:end.day.cor]),
totalTestsPerDay[start.day.cor:end.day.cor], use="pairwise")
quantile(correlation.reduced,c(0.025,0.5,0.975))
# Figure 2
# plot locally acquired symptomatic infections over time, alongside number of cases reported in the US
# plot proportion of locally acquired symptomatic infections reported over time
# alongside numbers of tests administered in the US
pdf('../plots/figure_2_symptomatic_daily_and_symptomatic_detected.pdf',
width=9,height=4.8, pointsize=14)
par(mfrow=c(1,2))
par(mar = c(5, 4, 4, 4) + 0.3)
plot(
as.Date('2019-12-31') + 1:ncol(cases.mat),
apply(cases.mat.obs,2,function(ii)median(ii,na.rm=T)),
ylim=c(0,quantile(cases.mat.obs[,ncol(cases.mat)],0.975)),
col=1,lwd=2,type='l',xaxs='i',yaxs='i',las=1,
xlim=as.Date('2019-12-31') + c(31,ncol(cases.mat)),
xlab='Date',ylab='Symptomatic infections',main='')
polygon(
c(as.Date('2019-12-31') + 1:ncol(cases.mat),
rev(as.Date('2019-12-31') + 1:ncol(cases.mat))),
c(apply(cases.mat.obs,2,function(ii)quantile(ii,0.025,na.rm=T)),
rev(apply(cases.mat.obs,2,function(ii)quantile(ii,0.975,na.rm=T)))),
border=NA,col=rgb(0,0,0,0.25))
mtext("A",side=3,line=0,
at=par("usr")[1]+0.05*diff(par("usr")[1:2]),
cex=1.2)
par(new = TRUE)
plot(as.Date('2019-12-31') + 1:ncol(cases.mat),
cases.US.local, type="l", col="red",
axes=F, bty = "n", xlab = "", ylab = "",
xlim=as.Date('2019-12-31') + c(31,ncol(cases.mat)), lwd=2,
xaxs='i',yaxs='i')
axis(side=4, at = pretty(range(cases.US.local)), col="red", col.axis="red",las=1)
mtext("Reported cases", side=4, line=3, lwd=2, col="red")
legend("topleft", col=c("red", "black"), lty="solid",
legend=rev(c("Model", "Data")),
bty="n", lwd=2)
plot(
as.Date('2019-12-31') + 1:ncol(p.mat),
apply(p.mat,2,function(ii)median(ii,na.rm=T)),
ylim=c(0,1),col=1,lwd=2,type='l',xaxs='i',yaxs='i',las=1,
xlim=as.Date('2019-12-31') + c(31,ncol(p.mat)),
xlab='Date',ylab='Symptomatics reporting',
main='')
polygon(
c(as.Date('2019-12-31') + 1:ncol(p.mat),
rev(as.Date('2019-12-31') + 1:ncol(p.mat))),
c(apply(p.mat,2,function(ii)quantile(ii,0.025,na.rm=T)),
rev(apply(p.mat,2,function(ii)quantile(ii,0.975,na.rm=T)))),
border=NA,col=rgb(0,0,0,0.25))
par(new = TRUE)
plot(as.Date('2019-12-31') + testing$Day[testing$Complete],
testing$Total[testing$Complete], type="l", col="red",
axes=F, bty = "n", xlab = "", ylab = "",
xlim=as.Date('2019-12-31') + c(31,ncol(cases.mat)), lwd=2,
xaxs='i',yaxs='i')
axis(side=4, at = pretty(range(testing$Total)), col="red", col.axis="red",las=1)
mtext("Tests administered", side=4, line=3, lwd=2, col="red")
legend("top", col=c("red", "black"), lty="solid",
legend=c("Data", "Model"),
bty="n", lwd=2)
mtext("B",side=3,line=0,
at=par("usr")[1]+0.05*diff(par("usr")[1:2]),
cex=1.2)
dev.off()
# Deaths results - processing
local.predict.death = t(matrix(unlist(lapply(local.predict, function(x) x$death)), length(local.predict[[1]]$death), replicates))
local.predict.death = rbinom(length(local.predict.death), as.vector(local.predict.death), propns.ASCF[,4])
local.predict.death = matrix(local.predict.death, replicates, length(local.predict[[1]]$death))
death.mat = t(matrix(
unlist(lapply(local, function(x) x$death)),
length(local[[1]]$death),
replicates))
death.mat.obs = rbinom(length(death.mat), as.vector(death.mat), propns.ASCF[,4])
death.mat = matrix(death.mat.obs, replicates, ncol(death.mat))
all.death.mat = t(matrix(
unlist(lapply(local, function(x) x$daily)),
length(local[[1]]$cases),
replicates))
all.death.mat.obs = rbinom(length(all.death.mat), as.vector(all.death.mat), propns.ASCF[,4])
all.death.mat = matrix(all.death.mat.obs, replicates, ncol(all.death.mat))
future.death.mat = all.death.mat-death.mat
death.day.min = which(deaths.US.local!=0)[1]
death.day.max = length(deaths.US.local)
quantile(rowSums(death.mat),c(0.025,0.5,0.975))
sum(deaths.US.local)
sum(death.mat[,death.day.min:death.day.max])/sum(death.mat)
quantile(rowSums(future.death.mat),c(0.025,0.5,0.975))
quantile(rowSums(future.death.mat)/rowSums(death.mat),c(0.025,0.5,0.975))
# Figure 3
# plot future deaths, assuming transmission stops on March 12
pdf('../plots/figure_3_deaths_forecast.pdf',width=9,height=6,pointsize=14)
time.max = 150#dim(local.predict.death)[2]
death.predict.median = apply(local.predict.death[,], 2, median)
death.predict.025 = apply(local.predict.death[,], 2, function(x)quantile(x,0.025))
death.predict.975 = apply(local.predict.death[,], 2, function(x)quantile(x,0.975))
times = seq(from=as.Date("2020-01-01"), by="1 day", length.out=time.max)
plot(times, c(deaths.US.local, rep(NA, time.max-length(deaths.US.local))),
xlim = c(as.Date("2020-02-01"),as.Date("2020-05-15")), ylim = c(0,max(death.predict.975)),
col="red", type="l", xlab="Month", ylab="Deaths", lwd=2, main="",xaxs='i',yaxs='i',las=1, xaxt = 'n')
month_starts = c(1, 30, 61, 91)
axis(side = 1, at = times[which(times %in% as.Date("2020-02-01"): as.Date("2020-05-15"))][month_starts], labels = F)
axis(side = 1, at = times[which(times %in% as.Date("2020-02-01"): as.Date("2020-05-15"))][month_starts + 15], tick = F,
labels = c('Feb', 'Mar', 'Apr', 'May'))
lines(times, death.predict.median[1:time.max], col="black", lwd=2)
polygon(
c(times, rev(times)),
c(death.predict.975[1:time.max],
rev(death.predict.025[1:time.max])),
border=NA,col=rgb(0,0,0,alpha=0.25))
abline(v=as.Date("2020-03-12"), lty="dashed")
legend("topleft",lty=rep("solid",2),lwd=2,
legend=c("Data", "Model"),col=c("red","black"),
bty='n')
dev.off()
|
library(caTools)
library(rms)
df = read.csv("./data/redwine.csv")
str(df)
length(unique(df$quality))
unique(df$quality)
library(nnet)
s1 = sample.split(df$quality,SplitRatio = 0.8)
train = subset(df,s1==T)
test = subset(df,s1==F)
str(train)
str(test)
m1 = multinom(quality~.,data=train)
pred = predict(m1,test)
pred[1:5]
table(pred,test$quality)
105+79+13
acc1 = 197/321
acc1#0.6137
summary(m1)
null = multinom(quality~1,data=train)
full = multinom(quality~.,data=train)
step(null,scope=list(lower=null,upper=full),direction = 'forward')
m2 = multinom(formula = quality ~ alcohol + volatile.acidity + total.sulfur.dioxide +
sulphates + free.sulfur.dioxide + chlorides + pH, data = train)
pred2 = predict(m2,test)
pred2[1:5]
table(pred2,test$quality)
acc2 = 193/321
acc2#0.6012
| /Modelling/redwine.R | no_license | gosh76/Rinaction | R | false | false | 794 | r | library(caTools)
library(rms)
df = read.csv("./data/redwine.csv")
str(df)
length(unique(df$quality))
unique(df$quality)
library(nnet)
s1 = sample.split(df$quality,SplitRatio = 0.8)
train = subset(df,s1==T)
test = subset(df,s1==F)
str(train)
str(test)
m1 = multinom(quality~.,data=train)
pred = predict(m1,test)
pred[1:5]
table(pred,test$quality)
105+79+13
acc1 = 197/321
acc1#0.6137
summary(m1)
null = multinom(quality~1,data=train)
full = multinom(quality~.,data=train)
step(null,scope=list(lower=null,upper=full),direction = 'forward')
m2 = multinom(formula = quality ~ alcohol + volatile.acidity + total.sulfur.dioxide +
sulphates + free.sulfur.dioxide + chlorides + pH, data = train)
pred2 = predict(m2,test)
pred2[1:5]
table(pred2,test$quality)
acc2 = 193/321
acc2#0.6012
|
## Put comments here that give an overall description of what your
## functions do
# Creat a matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
invX <- NULL
set <- function(y)
{
x <<- y
invX <<- NULL
}
get <- function() x
setInv <- function(inv) invX <<- inv
getInv <- function() invX
list(set = set, get = get, setInv = setInv, getInv = getInv)
}
## Return a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
invX = x$getInv()
if (!is.null(invX))
{
message("getting cached data")
return(invX)
}
data <- x$get()
invX <- solve(data)
x$setInv(invX)
invX
}
| /cachematrix.R | no_license | koenma/ProgrammingAssignment2 | R | false | false | 656 | r | ## Put comments here that give an overall description of what your
## functions do
# Creat a matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
invX <- NULL
set <- function(y)
{
x <<- y
invX <<- NULL
}
get <- function() x
setInv <- function(inv) invX <<- inv
getInv <- function() invX
list(set = set, get = get, setInv = setInv, getInv = getInv)
}
## Return a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
invX = x$getInv()
if (!is.null(invX))
{
message("getting cached data")
return(invX)
}
data <- x$get()
invX <- solve(data)
x$setInv(invX)
invX
}
|
library(tidyverse)
tbl <- read_table2(
"Yr Geo Obs1 Obs2
2001 Dist1 1 3
2002 Dist1 2 5
2003 Dist1 4 2
2004 Dist1 2 1
2001 Dist2 1 3
2002 Dist2 .9 5
2003 Dist2 6 8
2004 Dist2 2 .2"
)
tbl %>%
gather("obsnum", "obs", Obs1, Obs2) %>%
unite(colname, Geo, obsnum, sep = "") %>%
spread(colname, obs) %>%
`colnames<-`(str_to_lower(colnames(.)))
| /stackoverflow/spread_multiple.R | no_license | Zedseayou/reprexes | R | false | false | 434 | r | library(tidyverse)
tbl <- read_table2(
"Yr Geo Obs1 Obs2
2001 Dist1 1 3
2002 Dist1 2 5
2003 Dist1 4 2
2004 Dist1 2 1
2001 Dist2 1 3
2002 Dist2 .9 5
2003 Dist2 6 8
2004 Dist2 2 .2"
)
tbl %>%
gather("obsnum", "obs", Obs1, Obs2) %>%
unite(colname, Geo, obsnum, sep = "") %>%
spread(colname, obs) %>%
`colnames<-`(str_to_lower(colnames(.)))
|
singlemap <- function(IMG =data$demoimage1, CORRECTIONMAT=data$DIFF50, VERBOSE=TRUE, reps=1, LEVEL=6) {
#--------------------------------------------------------------
#
# TITLE: singlemap()
# AUTHOR: TARMO REMMEL
# DATE: 23 January 2020
# CALLS: findrow(), findcol(), CARsimu(), calculate_lsm(), wi()
# CALLED BY: NA
# NEEDS: landscapemetrics, raster
# NOTES: USED TO PERFORM THE RHO BIAS CORRECTION AND TO
# PRODUCE DATAFRAMES OF CLASS METRIC RESULTS FOR
# reps NUMBER OF REALIZATIONS. RESULTS ARE STORED
# IN TWO OBJECTS, ONE WITH METRICS COMPUTED FOR THE
# LOWER CLASS VALUE IN THE IMAGE, A SECOND FOR THE
# HIGHER CLASS VALUE IN THE IMAGE (E.G., 0 AND 1)
#
#--------------------------------------------------------------
# SAVE GRAPHIC PARAMETERS AND RESTATE THEM ON EXIT
opar <- par(no.readonly =TRUE)
on.exit(par(opar))
# READ THE WHITTLE CORRECTION MATRIX FROM THE APPROPRIATE ENVIRONMENT
DIFFERENCEMAT <- CORRECTIONMAT
# COMPUTE THE WHITTLE ESTIMATION OF RHO
rho <- wi(BE=IMG, CONTROL=TRUE, SIZE=LEVEL)
# COMPUTE THE ESTIMATED PROPORTION OF THE LOWER CATEGORY VALUE
proportion <- table(IMG)[1]/sum(table(IMG))
rindex <- findrow(autocorr=rho, DIFFMAT=DIFFERENCEMAT, VERBOSE=FALSE)
cindex <- findcol(prop=proportion, DIFFMAT=DIFFERENCEMAT, VERBOSE=FALSE)
# APPLY BIAS CORRECTION AND DEAL WITH SPECIAL CASES OF 99
if(rindex == 99 | cindex == 99) {
correctionfactor <- 0
} # END IF
else {
correctionfactor <- DIFFERENCEMAT[rindex, cindex]
} # END ELSE
# APPLY BIAS CORRECTION FACTOR
fixedrho <- rho + correctionfactor
# IF RHO IS 0.25 OR GREATER, IT EXCEEDS THE LIMIT FOR CARsimu().
if(fixedrho >= 0.25) {
fixedrho <- 0.2499999
} # END IF
# PROVIDE USER FEEDBACK IF REQUESTED
if(VERBOSE) {
cat("rho: ", rho, "\n", sep="")
cat("adj. rho: ", fixedrho, "\n", sep="")
cat("True rho: ", fixedrho * 4, "\n", sep="")
} # END IF
# NOTE: USE fixedrho IN CARsimu() AS THE R1 AND C1 PARAMETERS
cat("\n...about to simulate ", reps, " realizations of binary images \nhaving a proportion of ", proportion, " low-value class pixels and a \nspatial autocorrelation parameter of ", fixedrho * 4, ".\n", sep="")
# PREPARE DATAFRAME TO HOLD METRIC RESULTS FOR EACH REALIZATION
# COLUMNS ARE METRICS, ROWS ARE REPLICATES
tab <- as.data.frame(matrix(data=NA, ncol=110, nrow=reps))
for(a in 1:reps) {
# PROVIDE USER FEEDBACK ON SCREEN
cat("\nProcessing realization: ", a, "\n", sep="")
# PRODUCE SIMULATED REALIZATION WITH GIVEN RHO AND PROPORTION PARAMETERS
realizationtemp <- CARsimu(rho = fixedrho, rajz = FALSE)
realization <- quantile(realizationtemp, proportion)
GARB <- realizationtemp > realization[1]
GARB <- factor(GARB)
GARB <- as.numeric(GARB)
realization <- GARB
dim(realization) <- c(64,64)
# COMPUTE AND STORE THE 55 CLASS METRICS USING PACKAGE: landscapemetrics
results <- calculate_lsm(raster(realization), level="class")
tab[a,] <- t(results[,"value"])
# ADD COLUMN NAMES IF THIS IS THE FIRST ITERATION
if(a==1) {
# ADD NAMES TO THE DATAFRAME TO DIFFERENTIATE THE COLUMNS
names(tab) <- paste(rep(c("LOW.", "HIGH."),55), t(results[,"metric"]))
}
} # END FOR: a
# PROVIDE USER FEEDBACK IF REQUESTED
if(VERBOSE) {
cat("\n---------------------------------------\n")
cat("Summary:\n")
cat("rho: ", round(rho, 6), "\n", sep="")
cat("adj. rho: ", round(fixedrho, 6), "\n", sep="")
cat("True rho: ", round(fixedrho * 4, 6), "\n", sep="")
cat("LOW (black): ", table(IMG)[1], " pixels\n", sep="")
cat("HIGH (white): ", table(IMG)[2], " pixels\n", sep="")
cat("---------------------------------------\n")
} # END IF
# RETURN OUTPUTS FROM FUNCTION
return(tab)
} # END FUNCTION: singlemap
| /R/singlemap.R | no_license | cran/ShapePattern | R | false | false | 4,071 | r | singlemap <- function(IMG =data$demoimage1, CORRECTIONMAT=data$DIFF50, VERBOSE=TRUE, reps=1, LEVEL=6) {
#--------------------------------------------------------------
#
# TITLE: singlemap()
# AUTHOR: TARMO REMMEL
# DATE: 23 January 2020
# CALLS: findrow(), findcol(), CARsimu(), calculate_lsm(), wi()
# CALLED BY: NA
# NEEDS: landscapemetrics, raster
# NOTES: USED TO PERFORM THE RHO BIAS CORRECTION AND TO
# PRODUCE DATAFRAMES OF CLASS METRIC RESULTS FOR
# reps NUMBER OF REALIZATIONS. RESULTS ARE STORED
# IN TWO OBJECTS, ONE WITH METRICS COMPUTED FOR THE
# LOWER CLASS VALUE IN THE IMAGE, A SECOND FOR THE
# HIGHER CLASS VALUE IN THE IMAGE (E.G., 0 AND 1)
#
#--------------------------------------------------------------
# SAVE GRAPHIC PARAMETERS AND RESTATE THEM ON EXIT
opar <- par(no.readonly =TRUE)
on.exit(par(opar))
# READ THE WHITTLE CORRECTION MATRIX FROM THE APPROPRIATE ENVIRONMENT
DIFFERENCEMAT <- CORRECTIONMAT
# COMPUTE THE WHITTLE ESTIMATION OF RHO
rho <- wi(BE=IMG, CONTROL=TRUE, SIZE=LEVEL)
# COMPUTE THE ESTIMATED PROPORTION OF THE LOWER CATEGORY VALUE
proportion <- table(IMG)[1]/sum(table(IMG))
rindex <- findrow(autocorr=rho, DIFFMAT=DIFFERENCEMAT, VERBOSE=FALSE)
cindex <- findcol(prop=proportion, DIFFMAT=DIFFERENCEMAT, VERBOSE=FALSE)
# APPLY BIAS CORRECTION AND DEAL WITH SPECIAL CASES OF 99
if(rindex == 99 | cindex == 99) {
correctionfactor <- 0
} # END IF
else {
correctionfactor <- DIFFERENCEMAT[rindex, cindex]
} # END ELSE
# APPLY BIAS CORRECTION FACTOR
fixedrho <- rho + correctionfactor
# IF RHO IS 0.25 OR GREATER, IT EXCEEDS THE LIMIT FOR CARsimu().
if(fixedrho >= 0.25) {
fixedrho <- 0.2499999
} # END IF
# PROVIDE USER FEEDBACK IF REQUESTED
if(VERBOSE) {
cat("rho: ", rho, "\n", sep="")
cat("adj. rho: ", fixedrho, "\n", sep="")
cat("True rho: ", fixedrho * 4, "\n", sep="")
} # END IF
# NOTE: USE fixedrho IN CARsimu() AS THE R1 AND C1 PARAMETERS
cat("\n...about to simulate ", reps, " realizations of binary images \nhaving a proportion of ", proportion, " low-value class pixels and a \nspatial autocorrelation parameter of ", fixedrho * 4, ".\n", sep="")
# PREPARE DATAFRAME TO HOLD METRIC RESULTS FOR EACH REALIZATION
# COLUMNS ARE METRICS, ROWS ARE REPLICATES
tab <- as.data.frame(matrix(data=NA, ncol=110, nrow=reps))
for(a in 1:reps) {
# PROVIDE USER FEEDBACK ON SCREEN
cat("\nProcessing realization: ", a, "\n", sep="")
# PRODUCE SIMULATED REALIZATION WITH GIVEN RHO AND PROPORTION PARAMETERS
realizationtemp <- CARsimu(rho = fixedrho, rajz = FALSE)
realization <- quantile(realizationtemp, proportion)
GARB <- realizationtemp > realization[1]
GARB <- factor(GARB)
GARB <- as.numeric(GARB)
realization <- GARB
dim(realization) <- c(64,64)
# COMPUTE AND STORE THE 55 CLASS METRICS USING PACKAGE: landscapemetrics
results <- calculate_lsm(raster(realization), level="class")
tab[a,] <- t(results[,"value"])
# ADD COLUMN NAMES IF THIS IS THE FIRST ITERATION
if(a==1) {
# ADD NAMES TO THE DATAFRAME TO DIFFERENTIATE THE COLUMNS
names(tab) <- paste(rep(c("LOW.", "HIGH."),55), t(results[,"metric"]))
}
} # END FOR: a
# PROVIDE USER FEEDBACK IF REQUESTED
if(VERBOSE) {
cat("\n---------------------------------------\n")
cat("Summary:\n")
cat("rho: ", round(rho, 6), "\n", sep="")
cat("adj. rho: ", round(fixedrho, 6), "\n", sep="")
cat("True rho: ", round(fixedrho * 4, 6), "\n", sep="")
cat("LOW (black): ", table(IMG)[1], " pixels\n", sep="")
cat("HIGH (white): ", table(IMG)[2], " pixels\n", sep="")
cat("---------------------------------------\n")
} # END IF
# RETURN OUTPUTS FROM FUNCTION
return(tab)
} # END FUNCTION: singlemap
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../../h2o-runit.R')
library(gbm)
test.GBM.smallcat <- function(conn) {
# Training set has 26 categories from A to Z
# Categories A, C, E, G, ... are perfect predictors of y = 1
# Categories B, D, F, H, ... are perfect predictors of y = 0
Log.info("Importing alphabet_cattest.csv data...\n")
alphabet.hex <- h2o.uploadFile(conn, locate("smalldata/gbm_test/alphabet_cattest.csv"), key = "alphabet.hex")
alphabet.hex$y <- as.factor(alphabet.hex$y)
Log.info("Summary of alphabet_cattest.csv from H2O:\n")
print(summary(alphabet.hex))
# Import CSV data for R to use in comparison
alphabet.data <- read.csv(locate("smalldata/gbm_test/alphabet_cattest.csv"), header = TRUE)
alphabet.data$y <- as.factor(alphabet.data$y)
Log.info("Summary of alphabet_cattest.csv from R:\n")
print(summary(alphabet.data))
# Train H2O GBM Model:
# No longer naive since group_split is always on
Log.info("H2O GBM (Naive Split) with parameters:\nntrees = 1, max_depth = 1, nbins = 100\n")
drfmodel.nogrp <- h2o.gbm(x = "X", y = "y", training_frame = alphabet.hex, ntrees = 1, max_depth = 1, nbins = 100, loss = "bernoulli")
print(drfmodel.nogrp)
drfmodel.nogrp.perf <- h2o.performance(drfmodel.nogrp, alphabet.hex)
Log.info("H2O GBM (Group Split) with parameters:\nntrees = 1, max_depth = 1, nbins = 100\n")
drfmodel.grpsplit <- h2o.gbm(x = "X", y = "y", training_frame = alphabet.hex, ntrees = 1, max_depth = 1, nbins = 100, loss = "bernoulli")
print(drfmodel.grpsplit)
drfmodel.grpsplit.perf <- h2o.performance(drfmodel.grpsplit, alphabet.hex)
# Check AUC and overall prediction error at least as good with group split than without
#Log.info("Expect GBM with Group Split to give Perfect Prediction in Single Iteration")
#expect_true(drfmodel.grpsplit@model$auc == 1)
#expect_true(drfmodel.grpsplit@model$confusion[3,3] == 0)
#expect_true(h2o.auc(drfmodel.grpsplit.perf) >= h2o.auc(drfmodel.nogrp.perf))
#expect_true(h2o.accuracy(drfmodel.grpsplit.perf, 0.5) <= h2o.accuracy(drfmodel.nogrp.perf, 0.5))
# Train R GBM Model:
# Log.info("R GBM with same parameters:")
# drfmodel.r <- gbm(y ~ ., data = alphabet.data, n.trees = 1, nodesize = 1)
# drfmodel.r.pred <- predict(drfmodel.r, alphabet.data, type = "response")
# Compute confusion matrices
# Log.info("R Confusion Matrix:"); print(drfmodel.r$confusion)
# Log.info("H2O (Group Split) Confusion Matrix:"); print(drfmodel.grpsplit@model$confusion)
# Compute the AUC - need to convert factors back to numeric
# actual <- ifelse(alphabet.data$y == "0", 0, 1)
# pred <- ifelse(drfmodel.r.pred == "0", 0, 1)
# R.auc = gbm.roc.area(actual, pred)
# Log.info(paste("R AUC:", R.auc, "\tH2O (Group Split) AUC:", drfmodel.grpsplit@model$auc))
testEnd()
}
doTest("GBM Test: Classification with 26 categorical level predictor", test.GBM.smallcat)
| /h2o-r/tests/testdir_algos/gbm/runit_GBM_groupsplit_smallcat.R | permissive | engr3os/h2o-dev | R | false | false | 2,950 | r | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../../h2o-runit.R')
library(gbm)
test.GBM.smallcat <- function(conn) {
# Training set has 26 categories from A to Z
# Categories A, C, E, G, ... are perfect predictors of y = 1
# Categories B, D, F, H, ... are perfect predictors of y = 0
Log.info("Importing alphabet_cattest.csv data...\n")
alphabet.hex <- h2o.uploadFile(conn, locate("smalldata/gbm_test/alphabet_cattest.csv"), key = "alphabet.hex")
alphabet.hex$y <- as.factor(alphabet.hex$y)
Log.info("Summary of alphabet_cattest.csv from H2O:\n")
print(summary(alphabet.hex))
# Import CSV data for R to use in comparison
alphabet.data <- read.csv(locate("smalldata/gbm_test/alphabet_cattest.csv"), header = TRUE)
alphabet.data$y <- as.factor(alphabet.data$y)
Log.info("Summary of alphabet_cattest.csv from R:\n")
print(summary(alphabet.data))
# Train H2O GBM Model:
# No longer naive since group_split is always on
Log.info("H2O GBM (Naive Split) with parameters:\nntrees = 1, max_depth = 1, nbins = 100\n")
drfmodel.nogrp <- h2o.gbm(x = "X", y = "y", training_frame = alphabet.hex, ntrees = 1, max_depth = 1, nbins = 100, loss = "bernoulli")
print(drfmodel.nogrp)
drfmodel.nogrp.perf <- h2o.performance(drfmodel.nogrp, alphabet.hex)
Log.info("H2O GBM (Group Split) with parameters:\nntrees = 1, max_depth = 1, nbins = 100\n")
drfmodel.grpsplit <- h2o.gbm(x = "X", y = "y", training_frame = alphabet.hex, ntrees = 1, max_depth = 1, nbins = 100, loss = "bernoulli")
print(drfmodel.grpsplit)
drfmodel.grpsplit.perf <- h2o.performance(drfmodel.grpsplit, alphabet.hex)
# Check AUC and overall prediction error at least as good with group split than without
#Log.info("Expect GBM with Group Split to give Perfect Prediction in Single Iteration")
#expect_true(drfmodel.grpsplit@model$auc == 1)
#expect_true(drfmodel.grpsplit@model$confusion[3,3] == 0)
#expect_true(h2o.auc(drfmodel.grpsplit.perf) >= h2o.auc(drfmodel.nogrp.perf))
#expect_true(h2o.accuracy(drfmodel.grpsplit.perf, 0.5) <= h2o.accuracy(drfmodel.nogrp.perf, 0.5))
# Train R GBM Model:
# Log.info("R GBM with same parameters:")
# drfmodel.r <- gbm(y ~ ., data = alphabet.data, n.trees = 1, nodesize = 1)
# drfmodel.r.pred <- predict(drfmodel.r, alphabet.data, type = "response")
# Compute confusion matrices
# Log.info("R Confusion Matrix:"); print(drfmodel.r$confusion)
# Log.info("H2O (Group Split) Confusion Matrix:"); print(drfmodel.grpsplit@model$confusion)
# Compute the AUC - need to convert factors back to numeric
# actual <- ifelse(alphabet.data$y == "0", 0, 1)
# pred <- ifelse(drfmodel.r.pred == "0", 0, 1)
# R.auc = gbm.roc.area(actual, pred)
# Log.info(paste("R AUC:", R.auc, "\tH2O (Group Split) AUC:", drfmodel.grpsplit@model$auc))
testEnd()
}
doTest("GBM Test: Classification with 26 categorical level predictor", test.GBM.smallcat)
|
t_generalize <- function(w1, w2, cnames1, cnames2) {
# Arguments #
# w1 = wave1 dataset
# w2 = wave2 dataset, which is w1 and w2 merged, actually.
# cnames1; cnames2 = character vector of column names; must be same name
# Error Handling #
if( !identical(cnames1, cnames2) ) {
stop("Error: cnames1 and cnames2 are not identical. Make them identical real good")
}
# Print Dimensions #
cat("Wave 1 Dimensions:\n")
print(dim(w1))
cat("Wave 2 Dimensions:\n")
print(dim(w2))
cat("\n")
# Select Columns
col_select1 <- colnames(w1) %in% cnames1
col_select2 <- colnames(w2) %in% cnames2
# select appropriate columns
w1 <- w1[,col_select1]
w2 <- w2[,col_select2]
w1 <- w1[, cnames1]
w2 <- w2[, cnames2]
# Create matrix holder
ttest_collect <- matrix(NA, nrow=ncol(w1), ncol=5) # nrow=length of t_out() result
# Loop over columns and fill down ttest_collect rows
for (i in 1:ncol(w1)){
ttest_collect[i,] <- t_out(t.test(w1[,i], w2[,i]))
}
# Label columns and rows
colnames(ttest_collect) <- c("W1_Mean", "W2_Mean", "Abs_Diff", "T_Stat", "P_Value")
rownames(ttest_collect) <- cnames1
# Return Matrix
return(ttest_collect)
}
| /R/t_generalize.R | no_license | lorenc5/Rmturkcheck | R | false | false | 1,193 | r | t_generalize <- function(w1, w2, cnames1, cnames2) {
# Arguments #
# w1 = wave1 dataset
# w2 = wave2 dataset, which is w1 and w2 merged, actually.
# cnames1; cnames2 = character vector of column names; must be same name
# Error Handling #
if( !identical(cnames1, cnames2) ) {
stop("Error: cnames1 and cnames2 are not identical. Make them identical real good")
}
# Print Dimensions #
cat("Wave 1 Dimensions:\n")
print(dim(w1))
cat("Wave 2 Dimensions:\n")
print(dim(w2))
cat("\n")
# Select Columns
col_select1 <- colnames(w1) %in% cnames1
col_select2 <- colnames(w2) %in% cnames2
# select appropriate columns
w1 <- w1[,col_select1]
w2 <- w2[,col_select2]
w1 <- w1[, cnames1]
w2 <- w2[, cnames2]
# Create matrix holder
ttest_collect <- matrix(NA, nrow=ncol(w1), ncol=5) # nrow=length of t_out() result
# Loop over columns and fill down ttest_collect rows
for (i in 1:ncol(w1)){
ttest_collect[i,] <- t_out(t.test(w1[,i], w2[,i]))
}
# Label columns and rows
colnames(ttest_collect) <- c("W1_Mean", "W2_Mean", "Abs_Diff", "T_Stat", "P_Value")
rownames(ttest_collect) <- cnames1
# Return Matrix
return(ttest_collect)
}
|
library(EBImage)
library(CRImage)
require(plyr)
require(foreach)
require(doSNOW)
ncluster <-4
setMKLthreads(n=2)
cl <- makeCluster(ncluster)
registerDoSNOW(cl)
FILES<-dir(path = 'Images/Sample_0_Crop_5_10/',full.names = T)[1:6]
areas<-foreach(filei=FILES,.packages = 'EBImage') %dopar% {
img<-readImage(filei)
# display(img)
num_agregates <- 5
cutting_area<-list(c(900:2000),c(400:1500))
a<-!img[cutting_area[[1]],cutting_area[[2]],3]>.9
# display(a,method='raster')
#Apply some filters for taking the '0' values inside de agregate#
y <- closing(a, makeBrush(5, shape='disc'))
#check#
# display(y,method='raster')
## Recognize and label each agregate as a differen object##
z <- bwlabel(y)
agregates<-as.numeric(names(sort(table(z),decreasing = T))[0:num_agregates+1])
Ag_count <-z*(matrix(z%in%c(agregates),ncol=dim(z)[2]))
ids<-unique(as.factor(Ag_count))
for (i in 1:num_agregates){
Ag_count[Ag_count==ids[i]]<-i-1
}
## re-color agregates in colors##
cols = c('black', rainbow(n=num_agregates))
result<-list()
result$Ag_count_colored = Image(cols[1+Ag_count], dim=dim(Ag_count),colormode = 'Color')
result$original <- img
result
#check#
# display(img,method = 'raster')
# display(Ag_count_colored,method = 'raster')
# computeFeatures.shape(Ag_count)[,1]
}
stopCluster(cl)
pdf(file = 'Plots/Check_slacking.pdf')
lapply(areas, function(x) {
display(x[[1]],method='raster')
display(x[[2]],method='raster')
})
dev.off()
setwd('Plots/')
shell.exec('Check_slacking.pdf')
setwd('..')
#####now do all of them ####
ncluster <-4
setMKLthreads(n=2)
cl <- makeCluster(ncluster)
registerDoSNOW(cl)
FILES<-dir(path = 'Images/Sample_0_Crop_5_10/',full.names = T)
areas<-foreach(filei=FILES,.packages = 'EBImage') %dopar% {
img<-readImage(filei)
# display(img)
num_agregates <- 5
cutting_area<-list(c(900:2000),c(400:1500))
a<-!img[cutting_area[[1]],cutting_area[[2]],3]>.9
# display(a,method='raster')
#Apply some filters for taking the '0' values inside de agregate#
y <- closing(a, makeBrush(5, shape='disc'))
#check#
# display(y,method='raster')
## Recognize and label each agregate as a differen object##
z <- bwlabel(y)
agregates<-as.numeric(names(sort(table(z),decreasing = T))[0:num_agregates+1])
Ag_count <-z*(matrix(z%in%c(agregates),ncol=dim(z)[2]))
ids<-unique(as.factor(Ag_count))
for (i in 1:num_agregates){
Ag_count[Ag_count==ids[i]]<-i-1
}
## re-color agregates in colors##
cols = c('black', terrain.colors(n=num_agregates))
result<-list()
result$Ag_count_colored = Image(cols[1+Ag_count], dim=dim(Ag_count))
result$original <- img
result
#check#
# display(img,method = 'raster')
# display(Ag_count_colored,method = 'raster')
computeFeatures.shape(Ag_count)[,1]
}
stopCluster(cl)
saveRDS(areas,file = 'RData/Sample_0_Crop_5_10.RData')
num_agregates <- 5
observations<-c(seq(0,120,1),seq(140,360,20),seq(420,7200,600))
for (i in 1:num_agregates){
Final <-do.call(rbind,areas)
Final <- c(0,sapply(2:nrow(Final),function(x) (Final[x,i]-Final[1,1])/Final[1,1]))
plot(observations,Final,type='b',main=paste0('agregate',i),lwd=1,xlab='seconds')
}
Area_aver <- data.frame(Time=observations,do.call(rbind,areas))
require(ggplot2)
require(reshape2)
Area_aver<-melt(Area_aver,id.vars = 'Time')
ggplot(Area_aver,aes(Time,value,colour=variable))+
geom_point(size=3)+
scale_x_log10()+
labs(x='Time (log_scale)') | /Sample_0_Crop_5_10.R | no_license | mariofajardo/ASWAT | R | false | false | 3,484 | r | library(EBImage)
library(CRImage)
require(plyr)
require(foreach)
require(doSNOW)
ncluster <-4
setMKLthreads(n=2)
cl <- makeCluster(ncluster)
registerDoSNOW(cl)
FILES<-dir(path = 'Images/Sample_0_Crop_5_10/',full.names = T)[1:6]
areas<-foreach(filei=FILES,.packages = 'EBImage') %dopar% {
img<-readImage(filei)
# display(img)
num_agregates <- 5
cutting_area<-list(c(900:2000),c(400:1500))
a<-!img[cutting_area[[1]],cutting_area[[2]],3]>.9
# display(a,method='raster')
#Apply some filters for taking the '0' values inside de agregate#
y <- closing(a, makeBrush(5, shape='disc'))
#check#
# display(y,method='raster')
## Recognize and label each agregate as a differen object##
z <- bwlabel(y)
agregates<-as.numeric(names(sort(table(z),decreasing = T))[0:num_agregates+1])
Ag_count <-z*(matrix(z%in%c(agregates),ncol=dim(z)[2]))
ids<-unique(as.factor(Ag_count))
for (i in 1:num_agregates){
Ag_count[Ag_count==ids[i]]<-i-1
}
## re-color agregates in colors##
cols = c('black', rainbow(n=num_agregates))
result<-list()
result$Ag_count_colored = Image(cols[1+Ag_count], dim=dim(Ag_count),colormode = 'Color')
result$original <- img
result
#check#
# display(img,method = 'raster')
# display(Ag_count_colored,method = 'raster')
# computeFeatures.shape(Ag_count)[,1]
}
stopCluster(cl)
pdf(file = 'Plots/Check_slacking.pdf')
lapply(areas, function(x) {
display(x[[1]],method='raster')
display(x[[2]],method='raster')
})
dev.off()
setwd('Plots/')
shell.exec('Check_slacking.pdf')
setwd('..')
#####now do all of them ####
ncluster <-4
setMKLthreads(n=2)
cl <- makeCluster(ncluster)
registerDoSNOW(cl)
FILES<-dir(path = 'Images/Sample_0_Crop_5_10/',full.names = T)
areas<-foreach(filei=FILES,.packages = 'EBImage') %dopar% {
img<-readImage(filei)
# display(img)
num_agregates <- 5
cutting_area<-list(c(900:2000),c(400:1500))
a<-!img[cutting_area[[1]],cutting_area[[2]],3]>.9
# display(a,method='raster')
#Apply some filters for taking the '0' values inside de agregate#
y <- closing(a, makeBrush(5, shape='disc'))
#check#
# display(y,method='raster')
## Recognize and label each agregate as a differen object##
z <- bwlabel(y)
agregates<-as.numeric(names(sort(table(z),decreasing = T))[0:num_agregates+1])
Ag_count <-z*(matrix(z%in%c(agregates),ncol=dim(z)[2]))
ids<-unique(as.factor(Ag_count))
for (i in 1:num_agregates){
Ag_count[Ag_count==ids[i]]<-i-1
}
## re-color agregates in colors##
cols = c('black', terrain.colors(n=num_agregates))
result<-list()
result$Ag_count_colored = Image(cols[1+Ag_count], dim=dim(Ag_count))
result$original <- img
result
#check#
# display(img,method = 'raster')
# display(Ag_count_colored,method = 'raster')
computeFeatures.shape(Ag_count)[,1]
}
stopCluster(cl)
saveRDS(areas,file = 'RData/Sample_0_Crop_5_10.RData')
num_agregates <- 5
observations<-c(seq(0,120,1),seq(140,360,20),seq(420,7200,600))
for (i in 1:num_agregates){
Final <-do.call(rbind,areas)
Final <- c(0,sapply(2:nrow(Final),function(x) (Final[x,i]-Final[1,1])/Final[1,1]))
plot(observations,Final,type='b',main=paste0('agregate',i),lwd=1,xlab='seconds')
}
Area_aver <- data.frame(Time=observations,do.call(rbind,areas))
require(ggplot2)
require(reshape2)
Area_aver<-melt(Area_aver,id.vars = 'Time')
ggplot(Area_aver,aes(Time,value,colour=variable))+
geom_point(size=3)+
scale_x_log10()+
labs(x='Time (log_scale)') |
##### Set working directory
setwd("/Users/tanyangfan/Desktop/data")
##### R packages
library(magrittr)
library(ISLR)
library(caret)
library(dplyr)
##### Data work
# Read data from xls file
exp_clothing <- readxl::read_excel("exp_clothing.xls")
# Define the Mean Square Error function
mse <- function(x,y) {mean((x-y)^2)}
# Subset dataset based on "holdout"
exp_development <- filter(exp_clothing, holdout == 0)
exp_holdout <- filter(exp_clothing, holdout == 1)
# Goodness of fit
results <-
data.frame(
model = c("AA", "AA", "AB", "AB"),
sample = c("development", "holdout", "development", "holdout"),
MSE = c(mse(exp_development$AA, exp_development$expend),
mse(exp_holdout$AA, exp_holdout$expend),
mse(exp_development$AB, exp_development$expend),
mse(exp_holdout$AB, exp_holdout$expend)),
Rsquared = c(R2(exp_development$AA, exp_development$expend),
R2(exp_holdout$AA, exp_holdout$expend),
R2(exp_development$AB, exp_development$expend),
R2(exp_holdout$AB, exp_holdout$expend))) | /Q9.R | no_license | yangfantan1997/localR | R | false | false | 1,091 | r | ##### Set working directory
setwd("/Users/tanyangfan/Desktop/data")
##### R packages
library(magrittr)
library(ISLR)
library(caret)
library(dplyr)
##### Data work
# Read data from xls file
exp_clothing <- readxl::read_excel("exp_clothing.xls")
# Define the Mean Square Error function
mse <- function(x,y) {mean((x-y)^2)}
# Subset dataset based on "holdout"
exp_development <- filter(exp_clothing, holdout == 0)
exp_holdout <- filter(exp_clothing, holdout == 1)
# Goodness of fit
results <-
data.frame(
model = c("AA", "AA", "AB", "AB"),
sample = c("development", "holdout", "development", "holdout"),
MSE = c(mse(exp_development$AA, exp_development$expend),
mse(exp_holdout$AA, exp_holdout$expend),
mse(exp_development$AB, exp_development$expend),
mse(exp_holdout$AB, exp_holdout$expend)),
Rsquared = c(R2(exp_development$AA, exp_development$expend),
R2(exp_holdout$AA, exp_holdout$expend),
R2(exp_development$AB, exp_development$expend),
R2(exp_holdout$AB, exp_holdout$expend))) |
# importing dataset
household_power_consumption <- read.table("household_power_consumption.txt", stringsAsFactors= F, header = T, sep=";", na.strings = "?") #import dataset
# convert dates and times
household_power_consumption$Time <- strptime(paste(household_power_consumption$Date, household_power_consumption$Time, sep = " "), format = "%d/%m/%Y %H:%M:%S")
household_power_consumption[, 1] <- as.Date(household_power_consumption$Date, format = "%d/%m/%Y")
# getting the subset
startDate <- "2007-02-01"
endDate <- "2007-02-02"
household_power_consumption <- household_power_consumption[format(household_power_consumption$Date, "%Y-%m-%d") == startDate | format(household_power_consumption$Date, "%Y-%m-%d") == endDate ,]
# plotting plot3.png
png(filename = "plot3.png")
plot(household_power_consumption$Time, household_power_consumption$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(household_power_consumption$Time, household_power_consumption$Sub_metering_2, type = "l", col = "red")
lines(household_power_consumption$Time, household_power_consumption$Sub_metering_3, type = "l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lwd =1) #add legend
dev.off() | /plot3.R | no_license | ankitpurohit7/ExData_Plotting1 | R | false | false | 1,280 | r | # importing dataset
household_power_consumption <- read.table("household_power_consumption.txt", stringsAsFactors= F, header = T, sep=";", na.strings = "?") #import dataset
# convert dates and times
household_power_consumption$Time <- strptime(paste(household_power_consumption$Date, household_power_consumption$Time, sep = " "), format = "%d/%m/%Y %H:%M:%S")
household_power_consumption[, 1] <- as.Date(household_power_consumption$Date, format = "%d/%m/%Y")
# getting the subset
startDate <- "2007-02-01"
endDate <- "2007-02-02"
household_power_consumption <- household_power_consumption[format(household_power_consumption$Date, "%Y-%m-%d") == startDate | format(household_power_consumption$Date, "%Y-%m-%d") == endDate ,]
# plotting plot3.png
png(filename = "plot3.png")
plot(household_power_consumption$Time, household_power_consumption$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(household_power_consumption$Time, household_power_consumption$Sub_metering_2, type = "l", col = "red")
lines(household_power_consumption$Time, household_power_consumption$Sub_metering_3, type = "l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lwd =1) #add legend
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outline_workflow.R
\name{outline_workflow}
\alias{outline_workflow}
\title{Provide a useful outline of a typical tidymodels workflow}
\usage{
outline_workflow(prefix = NULL)
}
\arguments{
\item{prefix}{a character vector}
}
\value{
a character vector
}
\description{
Provide a useful outline of a typical tidymodels workflow
}
\examples{
outline_workflow()
}
| /man/outline_workflow.Rd | permissive | seanlee0622/sometools | R | false | true | 437 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outline_workflow.R
\name{outline_workflow}
\alias{outline_workflow}
\title{Provide a useful outline of a typical tidymodels workflow}
\usage{
outline_workflow(prefix = NULL)
}
\arguments{
\item{prefix}{a character vector}
}
\value{
a character vector
}
\description{
Provide a useful outline of a typical tidymodels workflow
}
\examples{
outline_workflow()
}
|
library(dplyr)
library(ggplot2)
wyniki <- read.csv("../dane/przetworzone/sumy_laureaty.csv")
ggplot(wyniki, aes(j_polski_podstawowa, matematyka_podstawowa)) +
geom_bin2d(binwidth=c(1, 1), drop=FALSE)
| /eksploracje/histogramy2d.r | no_license | stared/delab-matury | R | false | false | 204 | r | library(dplyr)
library(ggplot2)
wyniki <- read.csv("../dane/przetworzone/sumy_laureaty.csv")
ggplot(wyniki, aes(j_polski_podstawowa, matematyka_podstawowa)) +
geom_bin2d(binwidth=c(1, 1), drop=FALSE)
|
myarray <- array(dim = c(2, 2, 2))
myarray[2, 1, 1] <- 0
myarray[1, 2, 1] <- 1
myarray[1, 1, 2] <- 2
myarray
x <- mat[1,] + 1
myarray[x[1], x[2], x[3]]
example(sum)
replicate(5,{
n <- 10
rnorm(n)
})
system.time({
n <- 10
rnorm()
})
order(c(1, 6, 8, 2, 2), c(0, 0, 0, 2, 1)) ?
rank(c(1, 6, 8, 2, 2))?
rank(c(1, 6, 8, 2, 2), ties.method = "first")?
gtools::mixedsort(c("a1", "a2", "a10"))
let <- rep(letters[1:3],4)
let
split(1:12,let)
cond <- TRUE
if (cond) {
x <- 0
lvl <- rep_len(1:3,40)
3.3.3 Exercise
3)
mat <- matrix(0, 10, 3); mat[c(1, 5, 8, 12, 16, 17, 19, 23, 24, 29)] <- 1
myarray <- array(dim = c(2, 2, 2))
myarray[2, 1, 1] <- 0
myarray[1, 2, 1] <- 1
myarray[1, 1, 2] <- 2
my_mat<- apply(mat,MARGIN=1, function(x){
x <- x+1
myarray[x[1],x[2],x[3]]
})
my_mat
x <- myarray[mat + 1]
x
# 3.4.5 Exercises
# 1)
split(1:40, sample(rep_len(1:3,40)))
# 2)
# x <- my_mtcars[1,]
# var <- x$my_col
# var
# x[[var]]
#
my_mtcars$my_val <- apply(my_mtcars, MARGIN=1, function(x){
var <- x[["my_col"]]
as.numeric(x[[var]])
})
my_mtcars
colnames(my_mtcars)
names(my_mtcars)
ind <-cbind(seq_len(nrow(my_mtcars)),
match(my_mtcars$my_col,colnames(my_mtcars)))
my_mtcars[ind]
# 3)
# code[df$id1]
# df$id1 <- code[df$id1]
df[1:3] <- code[unlist(df[1:3])]
dim(mpg)
nrow(mpg)
ncol(mpg)
mpg
str(mpg)
# 4)
ggplot(mpg,aes(x=hwy,y=cyl))+
geom_point()
ggplot(mpg)+
geom_point(aes(hwy,cyl))
ggplot(mpg,mapping=aes(x=hwy,y=cyl))+
geom_point()
#5)
ggplot(mpg,aes(x=class, y= drv))+
geom_point()
#??
MY_THEME <- function(p, coeff = 1) {
p + theme_bw() +
theme(plot.title = element_text(size = rel(2.0 * coeff), hjust = 0.5),
plot.subtitle = element_text(size = rel(1.5 * coeff), hjust = 0.5),
legend.title = element_text(size = rel(1.8 * coeff)),
legend.text = element_text(size = rel(1.3 * coeff)),
axis.title = element_text(size = rel(1.5 * coeff)),
axis.text = element_text(size = rel(1.2 * coeff)),
legend.key.height = unit(1.3 * coeff, "line"),
legend.key.width = unit(1.3 * coeff, "line"))
}
myggplot <- function(..., coeff = 1) {
MY_THEME(ggplot2::ggplot(...), coeff = coeff)
}
runif(n = 10, min = 1, max = 100)
1 %>%
runif(10, max = 100)
runif(1,10,100)
1 %>%
runif(10, ., max = 100)
tibble(
x = 1:5,
y = 1,
z = x ^ 2 + y
)
bool f3(LogicalVector x) {
}
| /exercise.R | no_license | lucyliu666/test2 | R | false | false | 2,416 | r | myarray <- array(dim = c(2, 2, 2))
myarray[2, 1, 1] <- 0
myarray[1, 2, 1] <- 1
myarray[1, 1, 2] <- 2
myarray
x <- mat[1,] + 1
myarray[x[1], x[2], x[3]]
example(sum)
replicate(5,{
n <- 10
rnorm(n)
})
system.time({
n <- 10
rnorm()
})
order(c(1, 6, 8, 2, 2), c(0, 0, 0, 2, 1)) ?
rank(c(1, 6, 8, 2, 2))?
rank(c(1, 6, 8, 2, 2), ties.method = "first")?
gtools::mixedsort(c("a1", "a2", "a10"))
let <- rep(letters[1:3],4)
let
split(1:12,let)
cond <- TRUE
if (cond) {
x <- 0
lvl <- rep_len(1:3,40)
3.3.3 Exercise
3)
mat <- matrix(0, 10, 3); mat[c(1, 5, 8, 12, 16, 17, 19, 23, 24, 29)] <- 1
myarray <- array(dim = c(2, 2, 2))
myarray[2, 1, 1] <- 0
myarray[1, 2, 1] <- 1
myarray[1, 1, 2] <- 2
my_mat<- apply(mat,MARGIN=1, function(x){
x <- x+1
myarray[x[1],x[2],x[3]]
})
my_mat
x <- myarray[mat + 1]
x
# 3.4.5 Exercises
# 1)
split(1:40, sample(rep_len(1:3,40)))
# 2)
# x <- my_mtcars[1,]
# var <- x$my_col
# var
# x[[var]]
#
my_mtcars$my_val <- apply(my_mtcars, MARGIN=1, function(x){
var <- x[["my_col"]]
as.numeric(x[[var]])
})
my_mtcars
colnames(my_mtcars)
names(my_mtcars)
ind <-cbind(seq_len(nrow(my_mtcars)),
match(my_mtcars$my_col,colnames(my_mtcars)))
my_mtcars[ind]
# 3)
# code[df$id1]
# df$id1 <- code[df$id1]
df[1:3] <- code[unlist(df[1:3])]
dim(mpg)
nrow(mpg)
ncol(mpg)
mpg
str(mpg)
# 4)
ggplot(mpg,aes(x=hwy,y=cyl))+
geom_point()
ggplot(mpg)+
geom_point(aes(hwy,cyl))
ggplot(mpg,mapping=aes(x=hwy,y=cyl))+
geom_point()
#5)
ggplot(mpg,aes(x=class, y= drv))+
geom_point()
#??
MY_THEME <- function(p, coeff = 1) {
p + theme_bw() +
theme(plot.title = element_text(size = rel(2.0 * coeff), hjust = 0.5),
plot.subtitle = element_text(size = rel(1.5 * coeff), hjust = 0.5),
legend.title = element_text(size = rel(1.8 * coeff)),
legend.text = element_text(size = rel(1.3 * coeff)),
axis.title = element_text(size = rel(1.5 * coeff)),
axis.text = element_text(size = rel(1.2 * coeff)),
legend.key.height = unit(1.3 * coeff, "line"),
legend.key.width = unit(1.3 * coeff, "line"))
}
myggplot <- function(..., coeff = 1) {
MY_THEME(ggplot2::ggplot(...), coeff = coeff)
}
runif(n = 10, min = 1, max = 100)
1 %>%
runif(10, max = 100)
runif(1,10,100)
1 %>%
runif(10, ., max = 100)
tibble(
x = 1:5,
y = 1,
z = x ^ 2 + y
)
bool f3(LogicalVector x) {
}
|
\name{di.fn}
\alias{di.fn}
\title{
\bold{di} function
}
\description{
Calculates a drug's dosing interval.
}
\usage{
di.fn(msc, mec, ke)
}
\arguments{
\item{msc}{
Drug's maximum safe concentration or Cmax (peak) concentration.
}
\item{mec}{
Drug's minimum effective concentration or Cmin (trough) concentration.
}
\item{ke}{
Drug's total elimination rate constant.
}
}
\value{
Returns the dosing interval (h).
}
\references{
See \bold{cpk-package} help.
}
\author{Oscar A. Linares MD and David T. Daly JD/MBA.\cr
Maintainer: Oscar A. Linares MD <OALinaresMD@gmail.com>
}
\examples{
msc <- 50; mec <- 20; ke <- 0.2078;
di <- di.fn(msc, mec, ke)
}
| /man/di.fn.Rd | no_license | cran/cpk | R | false | false | 698 | rd | \name{di.fn}
\alias{di.fn}
\title{
\bold{di} function
}
\description{
Calculates a drug's dosing interval.
}
\usage{
di.fn(msc, mec, ke)
}
\arguments{
\item{msc}{
Drug's maximum safe concentration or Cmax (peak) concentration.
}
\item{mec}{
Drug's minimum effective concentration or Cmin (trough) concentration.
}
\item{ke}{
Drug's total elimination rate constant.
}
}
\value{
Returns the dosing interval (h).
}
\references{
See \bold{cpk-package} help.
}
\author{Oscar A. Linares MD and David T. Daly JD/MBA.\cr
Maintainer: Oscar A. Linares MD <OALinaresMD@gmail.com>
}
\examples{
msc <- 50; mec <- 20; ke <- 0.2078;
di <- di.fn(msc, mec, ke)
}
|
#' Compute the Hoover coefficient of specialization from regions - industries matrices
#'
#' This function computes the Hoover coefficient of specialization from regions - industries matrices. The higher the coefficient, the greater the regional specialization. This index is closely related to the Krugman specialisation index.
#' @param mat An incidence matrix with regions in rows and industries in columns
#' @keywords specialization
#' @export
#' @examples
#' ## generate a region - industry matrix
#' set.seed(31)
#' mat <- matrix(sample(0:100,20,replace=T), ncol = 4)
#' rownames(mat) <- c ("R1", "R2", "R3", "R4", "R5")
#' colnames(mat) <- c ("I1", "I2", "I3", "I4")
#'
#' ## run the function
#' spec.coeff (mat)
#' @author Pierre-Alexandre Balland \email{p.balland@uu.nl}
#' @seealso \code{\link{Krugman.index}}
#' @references Hoover, E.M. and Giarratani, F. (1985) \emph{An Introduction to Regional Economics}. 3rd edition. New York: Alfred A. Knopf (see table 9-4 in particular)
spec.coeff <- function(mat) {
share_tech_city <- mat / rowSums (mat)
share_tech_total <- colSums (mat) / sum (mat)
x <- matrix (share_tech_total,
nrow = nrow (share_tech_city),
ncol = length (share_tech_total), byrow = T)
K <- rowSums (abs (share_tech_city - x))
K = K/2
return (K)
}
| /R/spec.coeff.r | no_license | PABalland/EconGeo | R | false | false | 1,317 | r | #' Compute the Hoover coefficient of specialization from regions - industries matrices
#'
#' This function computes the Hoover coefficient of specialization from regions - industries matrices. The higher the coefficient, the greater the regional specialization. This index is closely related to the Krugman specialisation index.
#' @param mat An incidence matrix with regions in rows and industries in columns
#' @keywords specialization
#' @export
#' @examples
#' ## generate a region - industry matrix
#' set.seed(31)
#' mat <- matrix(sample(0:100,20,replace=T), ncol = 4)
#' rownames(mat) <- c ("R1", "R2", "R3", "R4", "R5")
#' colnames(mat) <- c ("I1", "I2", "I3", "I4")
#'
#' ## run the function
#' spec.coeff (mat)
#' @author Pierre-Alexandre Balland \email{p.balland@uu.nl}
#' @seealso \code{\link{Krugman.index}}
#' @references Hoover, E.M. and Giarratani, F. (1985) \emph{An Introduction to Regional Economics}. 3rd edition. New York: Alfred A. Knopf (see table 9-4 in particular)
spec.coeff <- function(mat) {
share_tech_city <- mat / rowSums (mat)
share_tech_total <- colSums (mat) / sum (mat)
x <- matrix (share_tech_total,
nrow = nrow (share_tech_city),
ncol = length (share_tech_total), byrow = T)
K <- rowSums (abs (share_tech_city - x))
K = K/2
return (K)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cloudsearch_operations.R
\name{delete_index_field}
\alias{delete_index_field}
\title{Removes an IndexField from the search domain}
\usage{
delete_index_field(DomainName, IndexFieldName)
}
\arguments{
\item{DomainName}{[required]}
\item{IndexFieldName}{[required] The name of the index field your want to remove from the domain's indexing options.}
}
\description{
Removes an \code{IndexField} from the search domain. For more information, see \href{http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-index-fields.html}{Configuring Index Fields} in the \emph{Amazon CloudSearch Developer Guide}.
}
\section{Accepted Parameters}{
\preformatted{delete_index_field(
DomainName = "string",
IndexFieldName = "string"
)
}
}
| /service/paws.cloudsearch/man/delete_index_field.Rd | permissive | CR-Mercado/paws | R | false | true | 828 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cloudsearch_operations.R
\name{delete_index_field}
\alias{delete_index_field}
\title{Removes an IndexField from the search domain}
\usage{
delete_index_field(DomainName, IndexFieldName)
}
\arguments{
\item{DomainName}{[required]}
\item{IndexFieldName}{[required] The name of the index field your want to remove from the domain's indexing options.}
}
\description{
Removes an \code{IndexField} from the search domain. For more information, see \href{http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-index-fields.html}{Configuring Index Fields} in the \emph{Amazon CloudSearch Developer Guide}.
}
\section{Accepted Parameters}{
\preformatted{delete_index_field(
DomainName = "string",
IndexFieldName = "string"
)
}
}
|
#' Calculate negative log posterior
#'
#' @param theta Correlation parameters
#' @param SGGP SGGP object
#' @param y Measured values of SGGP$design
#' @param ... Just a placeholder
#' @param Xs Supplementary input data
#' @param ys Supplementary output data
#' @param HandlingSuppData How should supplementary data be handled?
#' * Correct: full likelihood with grid and supplemental data
#' * Only: only use supplemental data
#' * Ignore: ignore supplemental data
#' * Mixture: sum of grid LLH and supplemental LLH, not statistically valid
#' * MarginalValidation: a validation shortcut
#' * FullValidation: a validation shortcut
#' @md
#'
#' @return Likelihood
#' @export
#' @useDynLib SGGP
#'
#' @examples
#' SG <- SGGPcreate(d=3, batchsize=20)
#' Y <- apply(SG$design, 1, function(x){x[1]+x[2]^2})
#' SG <- SGGPfit(SG, Y)
#' SGGP_internal_neglogpost(SG$thetaMAP, SG=SG, y=SG$y)
SGGP_internal_neglogpost <- function(theta,SGGP,y,...,ys=NULL,Xs=NULL,HandlingSuppData = "Correct") {
# Return Inf if theta is too large
epsssV = 0
if (max(theta) >= 1 || min(theta) <= -1) {
return(Inf)
} else{
if (runif(1)<.001) message(paste("HandlingSuppData is:", HandlingSuppData))
if (!(HandlingSuppData %in% c("Correct", "Only", "Ignore", "Mixture", "MarginalValidation", "FullValidation"))) {
stop(paste("HandlingSuppData in SGGP_internal_neglogpost must be one of",
"Correct, Only, Ignore, Mixture, MarginalValidation, FullValidation"))
}
if(!(is.null(ys) || length(ys)==0) && (is.null(y) || length(y)==0)){
# Message user if actually changing it
if (HandlingSuppData != "Only") {
if (runif(1)<.01) message(paste("Changing HandlingSuppData to Only from", HandlingSuppData))
}
HandlingSuppData = "Only"
}else if((is.null(ys) || length(ys)==0) && !(is.null(y) || length(y)==0)){
# If making change, message user
if (HandlingSuppData != "Ignore") {
if (runif(1)<.01) message(paste("Changing HandlingSuppData to Ignore from", HandlingSuppData))
}
HandlingSuppData = "Ignore"
}else if((is.null(ys) || length(ys)==0) && (is.null(y) || length(y)==0)){
stop(paste("You have given no y or ys to SGGP_internal_neglogpost"))
}
if(HandlingSuppData == "Only" ||
HandlingSuppData == "Mixture" ||
HandlingSuppData == "FullValidation" ||
HandlingSuppData == "Correct"){
Sigma_t = matrix(0,dim(Xs)[1],dim(Xs)[1])
for (dimlcv in 1:SGGP$d) { # Loop over dimensions
V = SGGP$CorrMat(Xs[,dimlcv], Xs[,dimlcv], theta[(dimlcv-1)*SGGP$numpara+1:SGGP$numpara],returnlogs=TRUE)
Sigma_t = Sigma_t+V
}
Sigma_t = exp(Sigma_t)
Sigma_t = (1-epsssV)*Sigma_t+diag(dim(Sigma_t)[1])*epsssV
}
if(HandlingSuppData == "Only" || HandlingSuppData == "Mixture"){
Sigma_chol = chol(Sigma_t)
Sti_resid = backsolve(Sigma_chol,backsolve(Sigma_chol,ys,transpose = TRUE))
sigma2_hat_supp = colSums(as.matrix(ys*Sti_resid))/dim(Xs)[1]
lDet_supp = 2*sum(log(diag(Sigma_chol)))
}
if(HandlingSuppData == "Ignore" || HandlingSuppData == "Mixture"){
sigma2anddsigma2 <- SGGP_internal_calcsigma2(SGGP=SGGP, y=y, theta=theta, return_lS=TRUE)
lS <- sigma2anddsigma2$lS
sigma2_hat_grid = sigma2anddsigma2$sigma2
lDet_grid = 0
for (blocklcv in 1:SGGP$uoCOUNT) {
nv = SGGP$gridsize[blocklcv]/SGGP$gridsizes[blocklcv,]
uonow = SGGP$uo[blocklcv,]
for (dimlcv in which(uonow>1.5)) {
lDet_grid = lDet_grid + (lS[uonow[dimlcv], dimlcv] - lS[uonow[dimlcv] - 1, dimlcv])*nv[dimlcv]
}
}
}
if(HandlingSuppData == "FullValidation" ||
HandlingSuppData == "Correct" ||
HandlingSuppData == "MarginalValidation"){
lik_stuff <- SGGP_internal_faststuff1(SGGP=SGGP, y=y, theta=theta)
cholS = lik_stuff$cholS
lS <- lik_stuff$lS
sigma2_hat_grid = lik_stuff$sigma2
pw = lik_stuff$pw
lDet_grid = 0 # Not needed for glik, only for lik
for (blocklcv in 1:SGGP$uoCOUNT) {
nv = SGGP$gridsize[blocklcv]/SGGP$gridsizes[blocklcv,]
uonow = SGGP$uo[blocklcv,]
for (dimlcv in which(uonow>1.5)) {
lDet_grid = lDet_grid + (lS[uonow[dimlcv], dimlcv] - lS[uonow[dimlcv] - 1, dimlcv])*nv[dimlcv]
}
}
#For these three, I need Cs,pw,allChols
Cs = (matrix(0,dim(Xs)[1],SGGP$ss))
GGGG = list(matrix(1,dim(Xs)[1],length(SGGP$xb)),SGGP$d)
for (dimlcv in 1:SGGP$d) { # Loop over dimensions
V = SGGP$CorrMat(Xs[,dimlcv], SGGP$xb[1:SGGP$sizest[max(SGGP$uo[,dimlcv])]],theta[(dimlcv-1)*SGGP$numpara+1:SGGP$numpara],returnlogs=TRUE)
GGGG[[dimlcv]] = exp(V)
Cs = Cs+V[,SGGP$designindex[,dimlcv]]
}
Cs = exp(Cs)
yhats = Cs%*%pw
}
if(HandlingSuppData == "FullValidation" ||
HandlingSuppData == "Correct" ){
Sigma_t = matrix(0,dim(Xs)[1],dim(Xs)[1])
for (dimlcv in 1:SGGP$d) { # Loop over dimensions
V = SGGP$CorrMat(Xs[,dimlcv], Xs[,dimlcv], theta[(dimlcv-1)*SGGP$numpara+1:SGGP$numpara],returnlogs=TRUE)
Sigma_t = Sigma_t+V
}
Sigma_t = exp(Sigma_t)
MSE_s = matrix(NaN,nrow=dim(Xs)[1]*dim(Xs)[1],ncol=(SGGP$d)*(SGGP$maxlevel))
Q = max(SGGP$uo[1:SGGP$uoCOUNT,])
for (dimlcv in 1:SGGP$d) {
gg = (dimlcv-1)*Q
TT1 = GGGG[[dimlcv]]
for (levellcv in 1:max(SGGP$uo[1:SGGP$uoCOUNT,dimlcv])) {
INDSN = 1:SGGP$sizest[levellcv]
INDSN = INDSN[sort(SGGP$xb[1:SGGP$sizest[levellcv]],index.return = TRUE)$ix]
REEALL =(SGGP_internal_postvarmatcalcfaster(TT1,
c(),
as.matrix(cholS[[gg+levellcv]]),
c(),
INDSN,
SGGP$numpara))
MSE_s[,(dimlcv-1)*SGGP$maxlevel+levellcv] = as.vector(REEALL)
}
}
Sigma_t2 = as.vector(Sigma_t)
rcpp_fastmatclcr(SGGP$uo[1:SGGP$uoCOUNT,], SGGP$w[1:SGGP$uoCOUNT], MSE_s,Sigma_t2,SGGP$maxlevel)
Sigma_t = matrix(Sigma_t2,nrow=dim(Xs)[1] , byrow = FALSE)
Sigma_t = (1-epsssV)*Sigma_t+diag(dim(Sigma_t)[1])*epsssV
Sigma_chol = chol(Sigma_t)
Sti_resid = backsolve(Sigma_chol,backsolve(Sigma_chol,ys-yhats,transpose = TRUE))
sigma2_hat_supp = colSums((ys-yhats)*Sti_resid)/dim(Xs)[1]
lDet_supp = 2*sum(log(diag(Sigma_chol)))
}
if(HandlingSuppData == "MarginalValidation"){
Sigma_t = matrix(1,dim(Xs)[1],1)
MSE_s = list(matrix(0,dim(Xs)[1],dim(Xs)[1]),(SGGP$d+1)*(SGGP$maxlevel+1))
for (dimlcv in 1:SGGP$d) {
for (levellcv in 1:max(SGGP$uo[1:SGGP$uoCOUNT,dimlcv])) {
Q = max(SGGP$uo[1:SGGP$uoCOUNT,])
gg = (dimlcv-1)*Q
INDSN = 1:SGGP$sizest[levellcv]
INDSN = INDSN[sort(SGGP$xb[1:SGGP$sizest[levellcv]],index.return = TRUE)$ix]
MSE_s[[(dimlcv)*SGGP$maxlevel+levellcv]] = (SGGP_internal_postvarmatcalcfaster(GGGG[[dimlcv]],
c(),
as.matrix(cholS[[gg+levellcv]]),
c(),
INDSN,
SGGP$numpara,
returndiag=TRUE))
}
}
for (blocklcv in 1:SGGP$uoCOUNT) {
if(abs(SGGP$w[blocklcv]) > 0.5){
ME_s = matrix(1,nrow=dim(Xs)[1],1)
for (dimlcv in 1:SGGP$d) {
levelnow = SGGP$uo[blocklcv,dimlcv]
ME_s = ME_s*MSE_s[[(dimlcv)*SGGP$maxlevel+levelnow]]
}
Sigma_t = Sigma_t-SGGP$w[blocklcv]*(ME_s)
}
}
Sigma_t = (1-epsssV)*Sigma_t+epsssV
lDet_supp = sum(log(Sigma_t))
if(is.matrix(ys)){
Sigma_t = t(matrix( rep( Sigma_t , dim(ys)[2] ) , ncol = ncol(t(Sigma_t)) , byrow = TRUE ))
}
sigma2_hat_supp = colSums((ys-yhats)^2/Sigma_t)/dim(Xs)[1]
}
neglogpost = 0
if(HandlingSuppData == "Ignore" || HandlingSuppData == "Mixture"){
if(!is.matrix(y)){
neglogpost = neglogpost+1/2*((length(y))*log(sigma2_hat_grid[1])-0.500*sum(log(1-theta)+log(theta+1))+lDet_grid)
}else{
neglogpost = neglogpost+1/2*((dim(y)[1])*sum(log(c(sigma2_hat_grid)))-0.500*sum(log(1-theta)+log(theta+1))+dim(y)[2]*lDet_grid)
}
}
if(HandlingSuppData == "Only" || HandlingSuppData == "Mixture"){
if(!is.matrix(y)){
neglogpost = neglogpost+1/2*((length(ys))*log(sigma2_hat_supp[1])-0.500*sum(log(1-theta)+log(theta+1))+lDet_supp)
}else{
neglogpost = neglogpost+1/2*((dim(ys)[1])*sum(log(c(sigma2_hat_supp)))-0.500*sum(log(1-theta)+log(theta+1))+dim(ys)[2]*lDet_supp)
}
}
if(HandlingSuppData =="Correct"){
sigma2_hat = sigma2_hat_grid*dim(SGGP$design)[1]/(dim(Xs)[1]+dim(SGGP$design)[1])+sigma2_hat_supp*dim(Xs)[1]/(dim(Xs)[1]+dim(SGGP$design)[1])
lDet = lDet_grid+lDet_supp
if(!is.matrix(y)){
neglogpost = 1/2*((length(y)+length(ys))*log(sigma2_hat[1])-0.500*sum(log(1-theta)+log(theta+1))+lDet)
}else{
neglogpost = 1/2*((dim(y)[1]+dim(ys)[1])*sum(log(c(sigma2_hat)))-0.500*sum(log(1-theta)+log(theta+1))+dim(y)[2]*lDet)
}
}
if(HandlingSuppData =="FullValidation" || HandlingSuppData =="MarginalValidation"){
sigma2_hat = sigma2_hat_grid*dim(SGGP$design)[1]/(dim(Xs)[1]+dim(SGGP$design)[1])+sigma2_hat_supp*dim(Xs)[1]/(dim(Xs)[1]+dim(SGGP$design)[1])
lDet = lDet_supp
if(!is.matrix(y)){
neglogpost = 1/2*((length(ys))*log(sigma2_hat[1])-0.500*sum(log(1-theta)+log(theta+1))+lDet)+1/2*length(ys)*sigma2_hat_supp[1]/sigma2_hat[1]
}else{
neglogpost = 1/2*((dim(ys)[1])*sum(log(c(sigma2_hat)))-0.500*sum(log(1-theta)+log(theta+1))+dim(ys)[2]*lDet)+1/2*dim(ys)[1]*sum(sigma2_hat_supp/sigma2_hat)
}
}
return(neglogpost)
}
}
| /scratch/OldScratch/BackupBeforeBigChanges20190310/R/SGGP_speedup_neglogpost.R | no_license | CollinErickson/CGGP | R | false | false | 10,750 | r | #' Calculate negative log posterior
#'
#' @param theta Correlation parameters
#' @param SGGP SGGP object
#' @param y Measured values of SGGP$design
#' @param ... Just a placeholder
#' @param Xs Supplementary input data
#' @param ys Supplementary output data
#' @param HandlingSuppData How should supplementary data be handled?
#' * Correct: full likelihood with grid and supplemental data
#' * Only: only use supplemental data
#' * Ignore: ignore supplemental data
#' * Mixture: sum of grid LLH and supplemental LLH, not statistically valid
#' * MarginalValidation: a validation shortcut
#' * FullValidation: a validation shortcut
#' @md
#'
#' @return Likelihood
#' @export
#' @useDynLib SGGP
#'
#' @examples
#' SG <- SGGPcreate(d=3, batchsize=20)
#' Y <- apply(SG$design, 1, function(x){x[1]+x[2]^2})
#' SG <- SGGPfit(SG, Y)
#' SGGP_internal_neglogpost(SG$thetaMAP, SG=SG, y=SG$y)
SGGP_internal_neglogpost <- function(theta,SGGP,y,...,ys=NULL,Xs=NULL,HandlingSuppData = "Correct") {
# Return Inf if theta is too large
epsssV = 0
if (max(theta) >= 1 || min(theta) <= -1) {
return(Inf)
} else{
if (runif(1)<.001) message(paste("HandlingSuppData is:", HandlingSuppData))
if (!(HandlingSuppData %in% c("Correct", "Only", "Ignore", "Mixture", "MarginalValidation", "FullValidation"))) {
stop(paste("HandlingSuppData in SGGP_internal_neglogpost must be one of",
"Correct, Only, Ignore, Mixture, MarginalValidation, FullValidation"))
}
if(!(is.null(ys) || length(ys)==0) && (is.null(y) || length(y)==0)){
# Message user if actually changing it
if (HandlingSuppData != "Only") {
if (runif(1)<.01) message(paste("Changing HandlingSuppData to Only from", HandlingSuppData))
}
HandlingSuppData = "Only"
}else if((is.null(ys) || length(ys)==0) && !(is.null(y) || length(y)==0)){
# If making change, message user
if (HandlingSuppData != "Ignore") {
if (runif(1)<.01) message(paste("Changing HandlingSuppData to Ignore from", HandlingSuppData))
}
HandlingSuppData = "Ignore"
}else if((is.null(ys) || length(ys)==0) && (is.null(y) || length(y)==0)){
stop(paste("You have given no y or ys to SGGP_internal_neglogpost"))
}
if(HandlingSuppData == "Only" ||
HandlingSuppData == "Mixture" ||
HandlingSuppData == "FullValidation" ||
HandlingSuppData == "Correct"){
Sigma_t = matrix(0,dim(Xs)[1],dim(Xs)[1])
for (dimlcv in 1:SGGP$d) { # Loop over dimensions
V = SGGP$CorrMat(Xs[,dimlcv], Xs[,dimlcv], theta[(dimlcv-1)*SGGP$numpara+1:SGGP$numpara],returnlogs=TRUE)
Sigma_t = Sigma_t+V
}
Sigma_t = exp(Sigma_t)
Sigma_t = (1-epsssV)*Sigma_t+diag(dim(Sigma_t)[1])*epsssV
}
if(HandlingSuppData == "Only" || HandlingSuppData == "Mixture"){
Sigma_chol = chol(Sigma_t)
Sti_resid = backsolve(Sigma_chol,backsolve(Sigma_chol,ys,transpose = TRUE))
sigma2_hat_supp = colSums(as.matrix(ys*Sti_resid))/dim(Xs)[1]
lDet_supp = 2*sum(log(diag(Sigma_chol)))
}
if(HandlingSuppData == "Ignore" || HandlingSuppData == "Mixture"){
sigma2anddsigma2 <- SGGP_internal_calcsigma2(SGGP=SGGP, y=y, theta=theta, return_lS=TRUE)
lS <- sigma2anddsigma2$lS
sigma2_hat_grid = sigma2anddsigma2$sigma2
lDet_grid = 0
for (blocklcv in 1:SGGP$uoCOUNT) {
nv = SGGP$gridsize[blocklcv]/SGGP$gridsizes[blocklcv,]
uonow = SGGP$uo[blocklcv,]
for (dimlcv in which(uonow>1.5)) {
lDet_grid = lDet_grid + (lS[uonow[dimlcv], dimlcv] - lS[uonow[dimlcv] - 1, dimlcv])*nv[dimlcv]
}
}
}
if(HandlingSuppData == "FullValidation" ||
HandlingSuppData == "Correct" ||
HandlingSuppData == "MarginalValidation"){
lik_stuff <- SGGP_internal_faststuff1(SGGP=SGGP, y=y, theta=theta)
cholS = lik_stuff$cholS
lS <- lik_stuff$lS
sigma2_hat_grid = lik_stuff$sigma2
pw = lik_stuff$pw
lDet_grid = 0 # Not needed for glik, only for lik
for (blocklcv in 1:SGGP$uoCOUNT) {
nv = SGGP$gridsize[blocklcv]/SGGP$gridsizes[blocklcv,]
uonow = SGGP$uo[blocklcv,]
for (dimlcv in which(uonow>1.5)) {
lDet_grid = lDet_grid + (lS[uonow[dimlcv], dimlcv] - lS[uonow[dimlcv] - 1, dimlcv])*nv[dimlcv]
}
}
#For these three, I need Cs,pw,allChols
Cs = (matrix(0,dim(Xs)[1],SGGP$ss))
GGGG = list(matrix(1,dim(Xs)[1],length(SGGP$xb)),SGGP$d)
for (dimlcv in 1:SGGP$d) { # Loop over dimensions
V = SGGP$CorrMat(Xs[,dimlcv], SGGP$xb[1:SGGP$sizest[max(SGGP$uo[,dimlcv])]],theta[(dimlcv-1)*SGGP$numpara+1:SGGP$numpara],returnlogs=TRUE)
GGGG[[dimlcv]] = exp(V)
Cs = Cs+V[,SGGP$designindex[,dimlcv]]
}
Cs = exp(Cs)
yhats = Cs%*%pw
}
if(HandlingSuppData == "FullValidation" ||
HandlingSuppData == "Correct" ){
Sigma_t = matrix(0,dim(Xs)[1],dim(Xs)[1])
for (dimlcv in 1:SGGP$d) { # Loop over dimensions
V = SGGP$CorrMat(Xs[,dimlcv], Xs[,dimlcv], theta[(dimlcv-1)*SGGP$numpara+1:SGGP$numpara],returnlogs=TRUE)
Sigma_t = Sigma_t+V
}
Sigma_t = exp(Sigma_t)
MSE_s = matrix(NaN,nrow=dim(Xs)[1]*dim(Xs)[1],ncol=(SGGP$d)*(SGGP$maxlevel))
Q = max(SGGP$uo[1:SGGP$uoCOUNT,])
for (dimlcv in 1:SGGP$d) {
gg = (dimlcv-1)*Q
TT1 = GGGG[[dimlcv]]
for (levellcv in 1:max(SGGP$uo[1:SGGP$uoCOUNT,dimlcv])) {
INDSN = 1:SGGP$sizest[levellcv]
INDSN = INDSN[sort(SGGP$xb[1:SGGP$sizest[levellcv]],index.return = TRUE)$ix]
REEALL =(SGGP_internal_postvarmatcalcfaster(TT1,
c(),
as.matrix(cholS[[gg+levellcv]]),
c(),
INDSN,
SGGP$numpara))
MSE_s[,(dimlcv-1)*SGGP$maxlevel+levellcv] = as.vector(REEALL)
}
}
Sigma_t2 = as.vector(Sigma_t)
rcpp_fastmatclcr(SGGP$uo[1:SGGP$uoCOUNT,], SGGP$w[1:SGGP$uoCOUNT], MSE_s,Sigma_t2,SGGP$maxlevel)
Sigma_t = matrix(Sigma_t2,nrow=dim(Xs)[1] , byrow = FALSE)
Sigma_t = (1-epsssV)*Sigma_t+diag(dim(Sigma_t)[1])*epsssV
Sigma_chol = chol(Sigma_t)
Sti_resid = backsolve(Sigma_chol,backsolve(Sigma_chol,ys-yhats,transpose = TRUE))
sigma2_hat_supp = colSums((ys-yhats)*Sti_resid)/dim(Xs)[1]
lDet_supp = 2*sum(log(diag(Sigma_chol)))
}
if(HandlingSuppData == "MarginalValidation"){
Sigma_t = matrix(1,dim(Xs)[1],1)
MSE_s = list(matrix(0,dim(Xs)[1],dim(Xs)[1]),(SGGP$d+1)*(SGGP$maxlevel+1))
for (dimlcv in 1:SGGP$d) {
for (levellcv in 1:max(SGGP$uo[1:SGGP$uoCOUNT,dimlcv])) {
Q = max(SGGP$uo[1:SGGP$uoCOUNT,])
gg = (dimlcv-1)*Q
INDSN = 1:SGGP$sizest[levellcv]
INDSN = INDSN[sort(SGGP$xb[1:SGGP$sizest[levellcv]],index.return = TRUE)$ix]
MSE_s[[(dimlcv)*SGGP$maxlevel+levellcv]] = (SGGP_internal_postvarmatcalcfaster(GGGG[[dimlcv]],
c(),
as.matrix(cholS[[gg+levellcv]]),
c(),
INDSN,
SGGP$numpara,
returndiag=TRUE))
}
}
for (blocklcv in 1:SGGP$uoCOUNT) {
if(abs(SGGP$w[blocklcv]) > 0.5){
ME_s = matrix(1,nrow=dim(Xs)[1],1)
for (dimlcv in 1:SGGP$d) {
levelnow = SGGP$uo[blocklcv,dimlcv]
ME_s = ME_s*MSE_s[[(dimlcv)*SGGP$maxlevel+levelnow]]
}
Sigma_t = Sigma_t-SGGP$w[blocklcv]*(ME_s)
}
}
Sigma_t = (1-epsssV)*Sigma_t+epsssV
lDet_supp = sum(log(Sigma_t))
if(is.matrix(ys)){
Sigma_t = t(matrix( rep( Sigma_t , dim(ys)[2] ) , ncol = ncol(t(Sigma_t)) , byrow = TRUE ))
}
sigma2_hat_supp = colSums((ys-yhats)^2/Sigma_t)/dim(Xs)[1]
}
neglogpost = 0
if(HandlingSuppData == "Ignore" || HandlingSuppData == "Mixture"){
if(!is.matrix(y)){
neglogpost = neglogpost+1/2*((length(y))*log(sigma2_hat_grid[1])-0.500*sum(log(1-theta)+log(theta+1))+lDet_grid)
}else{
neglogpost = neglogpost+1/2*((dim(y)[1])*sum(log(c(sigma2_hat_grid)))-0.500*sum(log(1-theta)+log(theta+1))+dim(y)[2]*lDet_grid)
}
}
if(HandlingSuppData == "Only" || HandlingSuppData == "Mixture"){
if(!is.matrix(y)){
neglogpost = neglogpost+1/2*((length(ys))*log(sigma2_hat_supp[1])-0.500*sum(log(1-theta)+log(theta+1))+lDet_supp)
}else{
neglogpost = neglogpost+1/2*((dim(ys)[1])*sum(log(c(sigma2_hat_supp)))-0.500*sum(log(1-theta)+log(theta+1))+dim(ys)[2]*lDet_supp)
}
}
if(HandlingSuppData =="Correct"){
sigma2_hat = sigma2_hat_grid*dim(SGGP$design)[1]/(dim(Xs)[1]+dim(SGGP$design)[1])+sigma2_hat_supp*dim(Xs)[1]/(dim(Xs)[1]+dim(SGGP$design)[1])
lDet = lDet_grid+lDet_supp
if(!is.matrix(y)){
neglogpost = 1/2*((length(y)+length(ys))*log(sigma2_hat[1])-0.500*sum(log(1-theta)+log(theta+1))+lDet)
}else{
neglogpost = 1/2*((dim(y)[1]+dim(ys)[1])*sum(log(c(sigma2_hat)))-0.500*sum(log(1-theta)+log(theta+1))+dim(y)[2]*lDet)
}
}
if(HandlingSuppData =="FullValidation" || HandlingSuppData =="MarginalValidation"){
sigma2_hat = sigma2_hat_grid*dim(SGGP$design)[1]/(dim(Xs)[1]+dim(SGGP$design)[1])+sigma2_hat_supp*dim(Xs)[1]/(dim(Xs)[1]+dim(SGGP$design)[1])
lDet = lDet_supp
if(!is.matrix(y)){
neglogpost = 1/2*((length(ys))*log(sigma2_hat[1])-0.500*sum(log(1-theta)+log(theta+1))+lDet)+1/2*length(ys)*sigma2_hat_supp[1]/sigma2_hat[1]
}else{
neglogpost = 1/2*((dim(ys)[1])*sum(log(c(sigma2_hat)))-0.500*sum(log(1-theta)+log(theta+1))+dim(ys)[2]*lDet)+1/2*dim(ys)[1]*sum(sigma2_hat_supp/sigma2_hat)
}
}
return(neglogpost)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outputs.R
\name{fuzz_results}
\alias{fuzz_results}
\alias{fuzz_value}
\alias{fuzz_call}
\title{Access individual fuzz test results}
\usage{
fuzz_value(fr, index = NULL, ...)
fuzz_call(fr, index = NULL, ...)
}
\arguments{
\item{fr}{\code{fuzz_results} object}
\item{index}{The test index (by position) to access. Same as the
\code{results_index} in the data frame returned by
\code{\link{as.data.frame.fuzz_results}}.}
\item{...}{Additional arguments must be named regex patterns that will be used to match against test names. The names of the patterns must match the function argument name(s) whose test names you wish to match.}
}
\description{
Access individual fuzz test results
}
\section{Functions}{
\itemize{
\item \code{fuzz_value}: Access the object returned by the fuzz test
\item \code{fuzz_call}: Access the call used for the fuzz test
}}
| /man/fuzz_results.Rd | no_license | mdlincoln/fuzzr | R | false | true | 933 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outputs.R
\name{fuzz_results}
\alias{fuzz_results}
\alias{fuzz_value}
\alias{fuzz_call}
\title{Access individual fuzz test results}
\usage{
fuzz_value(fr, index = NULL, ...)
fuzz_call(fr, index = NULL, ...)
}
\arguments{
\item{fr}{\code{fuzz_results} object}
\item{index}{The test index (by position) to access. Same as the
\code{results_index} in the data frame returned by
\code{\link{as.data.frame.fuzz_results}}.}
\item{...}{Additional arguments must be named regex patterns that will be used to match against test names. The names of the patterns must match the function argument name(s) whose test names you wish to match.}
}
\description{
Access individual fuzz test results
}
\section{Functions}{
\itemize{
\item \code{fuzz_value}: Access the object returned by the fuzz test
\item \code{fuzz_call}: Access the call used for the fuzz test
}}
|
x <- 0:3
p_x <- dhyper(x, 4,5,3)
print(p_x[1] + p_x[2] + p_x[3])
#8 with replacement
x <- 0:5
p_x <- dbinom(x, 5, 20/130)
dist <- data.frame(x, p_x, digits=3)
print(dist)
#8 without
x <- 0:5
p_x <- dhyper(x, 20, 110, 5)
dist <- data.frame(x, p_x, digits=3)
print(dist)
#10b
dpois(10,8)
#10c
dpois(30,32)
#11b
a <- dpois(0,5)
b <- dpois(1,5)
c <- dpois(2,5)
print(a + b + c)
#11c
a <- 1 - sum(dpois(0:19, 21.43))
a
#12
dpois(3,4.5)
#13 example
par(mfrom = c(3,2))
x <- 0:30
mu <- seq(2,12,2)
for(y in mu)
{
plot(dpois(x,y)~x)
title(paste("Mean is",y))
} | /notes_ch3.R | no_license | teemal/stats_scripts | R | false | false | 567 | r | x <- 0:3
p_x <- dhyper(x, 4,5,3)
print(p_x[1] + p_x[2] + p_x[3])
#8 with replacement
x <- 0:5
p_x <- dbinom(x, 5, 20/130)
dist <- data.frame(x, p_x, digits=3)
print(dist)
#8 without
x <- 0:5
p_x <- dhyper(x, 20, 110, 5)
dist <- data.frame(x, p_x, digits=3)
print(dist)
#10b
dpois(10,8)
#10c
dpois(30,32)
#11b
a <- dpois(0,5)
b <- dpois(1,5)
c <- dpois(2,5)
print(a + b + c)
#11c
a <- 1 - sum(dpois(0:19, 21.43))
a
#12
dpois(3,4.5)
#13 example
par(mfrom = c(3,2))
x <- 0:30
mu <- seq(2,12,2)
for(y in mu)
{
plot(dpois(x,y)~x)
title(paste("Mean is",y))
} |
library(replicationInterval)
### Name: ri.d.demo
### Title: Simulation to demonstrate the meaning of the d-value replication
### interval
### Aliases: ri.d.demo
### ** Examples
ri.d.demo(n1=50,n2=50,rep.n1=100,rep.n2=100,pop.d=.50,number.trials=10)
| /data/genthat_extracted_code/replicationInterval/examples/ri.d.demo.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 258 | r | library(replicationInterval)
### Name: ri.d.demo
### Title: Simulation to demonstrate the meaning of the d-value replication
### interval
### Aliases: ri.d.demo
### ** Examples
ri.d.demo(n1=50,n2=50,rep.n1=100,rep.n2=100,pop.d=.50,number.trials=10)
|
#' CUSUM change detection for a stream in R with known prechange parameters
#'
#' Original implementation in R of CUSUM change detector, but now
#' expecting the prechange mean and variance to be specified.
#'
#'
#' @param stream The stream of observations.
#'
#' @param BL The burn-in length - this won't actually be used, but is kept
#' for historical reasons.
#'
#' @param params A list of parameters for the CUSUM algorithm. Consists of
#' \describe{
#' \item{\code{d}}{A control parameter also known as
#' \eqn{k}.}
#'
#' \item{\code{B}}{A control parameter also known as
#' \eqn{h}.}
#' }
#'
#' @param mu0 The prechange mean, which is assumed known in this context
#'
#' @param sigma0 The prechange standard deviation, which is assumed known
#' in this context
#'
#'
#' @return A vector of estimated changepoints.
#'
#'
#' @section Author:
#' Dean Bodenham
#'
#'
#' @section References:
#' D. A. Bodenham and N. M. Adams (2016)
#' \emph{Continuous monitoring for changepoints in data
#' streams using adaptive estimation}.
#' Statistics and Computing
#' doi:10.1007/s11222-016-9684-8
#'
#'
#' @keywords internal
CUSUM_stream_jumpdetect_prechange <- function(stream, BL, params, mu0, sigma0){
d <- params[[1]]
B <- params[[2]]
#will run until stream finished...
streampos <- 1
N <- length(stream)
detected_count <- 0
#vector for saving jumps, will trim later
#just a quick way of saving space - can't have more than BL*det_count obs in stream,
#only one changepoint
M <- 1
detect_pos_vec <- rep(0, M)
#jump_found is a boolean that flags if a jump is detected
jump_found <- FALSE
while ((streampos < N) && (jump_found==FALSE)){
#set values for CUSUM mean and variance
mean_j <- mu0
var_j <- sigma0^2
sd_j <- sqrt(var_j)
nu_j <- d*sd_j
control_j <- B*sd_j
#----------end of Phase 1: burn-in----------#
#--------------------------------------------------------------------#
#Phase 2: detect change
#CUSUM starts, sample from distribution 1
S_j <- 0
T_j <- 0
isOutOfLimitsBool <- FALSE
#j starts at 0
j <- 0
timedetected <- N
while ((jump_found==FALSE) && (streampos < N)) {
#update j, then last run is when j=N
#which starts when j=N-1
j <- j+1
#get the next observation from stream
x_new <- get_nextobs_fromstream(stream, streampos)
streampos <- update_streampos(streampos)
#DO NOT update control parameters
#update cusums
S_j <- S_j + x_new - mean_j - nu_j
S_j <- S_j*(S_j>0)
T_j <- T_j + mean_j - x_new - nu_j
T_j <- (T_j)*(T_j>0)
#check if outside control
S_isOut <- (S_j > control_j)
T_isOut <- (T_j > control_j)
#check if there is a jump
jump_found <- S_isOut | T_isOut
if (jump_found==TRUE){
detected_count <- detected_count + 1
#should use streampos-1
detect_pos_vec[detected_count] <- streampos
}
} # end of while ((jump_found==FALSE) && (streampos < N))
#end of Phase 2 - now restarting burn-in
} #end while (streampos < N)
#trim detect_pos_vec
detect_pos_vec <- detect_pos_vec[1:detected_count]
return(detect_pos_vec)
} # end of CUSUM detect
#' EWMA change detection for a stream in R with known prechange parameters
#'
#' Original implementation in R of EWMA change detector, but now
#' expecting the prechange mean and variance to be specified.
#'
#'
#' @param stream The stream of observations.
#'
#' @param BL The burn-in length - this won't actually be used, but is
#' kept for historical reasons.
#'
#' @param params A list of parameters for the EWMA algorithm. Consists of
#' \describe{
#' \item{\code{r}}{A control parameter which controls
#' the rate of downweighting.}
#'
#' \item{\code{L}}{A control parameter which determines
#' the width of the control limits.}
#' }
#'
#' @param mu0 The prechange mean, which is assumed known in this context
#'
#' @param sigma0 The prechange standard deviation, which is assumed known
#' in this context
#'
#'
#' @return A vector of estimated changepoints.
#'
#'
#' @keywords internal
EWMA_stream_jumpdetect_prechange <- function(stream, BL, params, mu0, sigma0){
#ewma params: c(r_1, L_1)
#also r =alpha, L=beta
r <- params[[1]]
L <- params[2]
#will run until stream finished...
streampos <- 1
N <- length(stream)
detected_count <- 0
#vector for saving jumps, will trim later
#just a quick way of saving space - can't have more than BL*det_count obs in stream,
#since every det restarts BL
detect_pos_vec <- rep(0, 1)
delta <- r / (2-r)
rFactorSigmaZ <- 1
sigmaZ <- 1
jump_found <- FALSE
while ((streampos < N) && (jump_found==FALSE)){
#set values for EWMA mean and variance
mu_1 <- mu0
sigma1_sq <- sigma0^2
sigma_1 <- sqrt(sigma1_sq)
#set EWMA control limits
#UL: upperlimit
UL <- mu_1 + L * sigma_1 * delta
#LL: lowerlimit
LL <- mu_1 - L * sigma_1 * delta
#----------end of Phase 1: burn-in----------#
#--------------------------------------------------------------------#
#Phase 2: detect change
#jump_found is a boolean that flags if a jump is detected
jump_found <- FALSE
isOutOfLimitsBool <- FALSE
timedetected <- N
#W <- mean_new
W <- 0
delta <- r/(2-r)
rFactorSigmaZ <- 1
sigmaZ <- 1
while ((jump_found==FALSE) && (streampos < N)) {
#get the next observation from stream
x_new <- get_nextobs_fromstream(stream, streampos)
streampos <- update_streampos(streampos)
#DO NOT update control parameters
#the ewma updating step
W <- r * x_new + (1-r)*W
rFactorSigmaZ <- rFactorSigmaZ * (1-r)^2
sigmaZ <- sqrt( delta * (1-rFactorSigmaZ) ) * sigma_1
UL <- mu_1 + L * sigmaZ
#LL: lowerlimit
LL <- mu_1 - L * sigmaZ
#check if there is a jump
if((W > UL) | (W < LL)){
jump_found <- TRUE
detected_count <- detected_count + 1
detect_pos_vec[detected_count] <- streampos
}
} # end of while ((jump_found==FALSE) && (streampos < N))
#end of Phase 2 - now restarting burn-in
} #end while (streampos < N)
#trim detect_pos_vec
detect_pos_vec <- detect_pos_vec[1:detected_count]
return(detect_pos_vec)
}#end of EWMA jump detect
#-------------------------------------------------------------------------#
#' Change detection using the Fixed Forgetting Factor method, prechange known
#'
#' Original implementation in R of FFF change detector, but now prechange is
#' known
#'
#' @param stream The stream of observations.
#'
#' @param BL The burn-in length - this won't actually be used, but is kept
#' for historical reasons.
#'
#' @param ffparams An \emph{unnamed} list of parameters for the FFF algorithm.
#' Consists of:
#' \describe{
#' \item{\code{lambda}}{The value of the fixed forgetting
#' factor (FFF). Should be in the range
#' [0,1].}
#'
#' \item{\code{p}}{The value of the significance threshold,
#' which was later renamed \code{alpha}
#' (in the paper, not in this function).}
#'
#' \item{\code{resettozero}}{A flag; if it zero, then the
#' ffmean will be reset to zero
#' after each change. Usually set
#' to 1 (i.e. do not reset).}
#'
#' \item{\code{u_init}}{The initial value of \code{u}.
#' Should be set to 0.}
#'
#' \item{\code{v_init}}{The initial value of \code{v}.
#' Should be set to 0.}
#'
#' \item{\code{w_init}}{The initial value of \code{w}.
#' Should be set to 0.}
#'
#' \item{\code{ffmean_init}}{The initial value of the
#' forgetting factor mean,
#' \code{ffmean}.
#' Should be set to 0.}
#'
#' \item{\code{ffvar_init}}{The initial value of the
#' forgetting factor variance,
#' \code{ffvar}.
#' Should be set to 0.}
#'
#' }
#'
#' @param mu0 The prechange mean, which is assumed known in this context
#'
#' @param sigma0 The prechange standard deviation, which is assumed known
#' in this context
#'
#' @return A vector of estimated changepoints.
#'
#'
#' @keywords internal
FFF_stream_jumpdetect_prechange <- function(stream, BL, ffparams, mu0, sigma0){
lambda <- ffparams[[1]]
p <- ffparams[[2]]
resettozero <- ffparams[[3]]
u_new <- ffparams[[4]]
v_new <- ffparams[[5]]
v_old <- v_new
w_new <- ffparams[[6]]
ffmean_new <- ffparams[[7]]
ffvar_new <- ffparams[[8]]
#will run until stream finished...
streampos <- 1
N <- length(stream)
detected_count <- 0
detect_pos_vec <- rep(0, 1)
jump_found <- FALSE
while ( (streampos < N) && (jump_found==FALSE)){
#begin the process of burning in and then detecting
#--------------------------------------------------------------------#
#Phase 1: burn-in; estimating parameters
#need to estimate mean and variance in burn in period
#we set the forgetting factor to be 1
lambda_beforeBL <- 1
w_BL_new <- 0
u_BL_new <- 0
v_BL_new <- 0
v_BL_old <- 0
#initialize mean and variance
mean_BL_new <- 0
var_BL_new <- 0
#set values for BL mean and variance
mean_burn <- mu0
var_burn <- sigma0^2
burnvec <- c(p, mean_burn, var_burn)
#--------------------------------------------------------------------#
#Phase 2: between burn-in and change
if (resettozero==0){
w_new <- 0
u_new <- 0
v_new <- 0
v_old <- 0
#initialize mean and variance
ffmean_new <- mu0
ffvar_new <- sigma0^2
#cat("resetting ff arl1...\n")
}
#--------------------------------------------------------------------#
#Phase 2: after BL; monitoring for jump
#jump_found is a boolean that flags if a jump is detected
#jump_found <- FALSE
thelimits <- c(0,0)
#a boolean saying if the jump has been detected
while((jump_found==FALSE) && (streampos < N)){
#get the next observation from stream
x_new <- get_nextobs_fromstream(stream, streampos)
streampos <- update_streampos(streampos)
#easier to update weight than to recalculate it
#get BL+1 w and u
w_new <- update_w(w_new, lambda)
u_new <- update_u(u_new, w_new)
#no need to update v, but we do anyway...
v_old <- v_new
v_new <- getv_from_u_and_w(u_new, w_new)
#update mean; old mean saved for variance later
#alternatively, could update variance first
#just the way the derivations work out...(deeper meaning, actually...)
ffmean_old <- ffmean_new
ffmean_new <- update_ffmean(ffmean_old, w_new, x_new)
#update variance
ffvar_old <- ffvar_new
ffvar_new <- update_var(ffvar_old, ffmean_old, lambda, v_old, v_new, w_new, x_new)
#new function used to calculate limits efficiently, in one function, instead of in several
#using Chebyshev
sigma_sq_u <- u_new*var_burn
# thelimits <- getmeanchebylimits(mean_burn, sigma_sq_u, p)
thelimits <- getNormalLimitsFromList(burnvec, u_new)
#boolean of whether or not ffmean is int he limits
inLimitsBool <- isInLimitsNormal(ffmean_new, thelimits)
#check for jump
if(inLimitsBool == FALSE)
{
jump_found <- TRUE
detected_count <- detected_count + 1
# detect_pos_vec[detected_count] <- streampos
#need to -1
detect_pos_vec[detected_count] <- streampos-1
}
} #end of while (jumpdetected==FALSE) - will restart process, if (streampos < N)
#end of Phase 2 - now either restart or if stream is finsihed return the detect_pos_vec
} #end while (streampos < N)
#trim detect_pos_vec
detect_pos_vec <- detect_pos_vec[1:detected_count]
return(detect_pos_vec)
}
#end FFF
#-------------------------------------------------------------------------#
#' Change detection using the AFF method, using prechange mean and vairance
#'
#' Original implementation in R of the AFF, with prechange parameters
#'
#'
#' @param stream The stream of observations.
#'
#' @param BL The burn-in length - this won't actually be used, but is kept
#' for historical reasons.
#'
#' @param affparams An \emph{unnamed} list of parameters for the FFF algorithm.
#' Consists of:
#' \describe{
#' \item{\code{lambda}}{The value of the fixed forgetting
#' factor (FFF). Should be in the range
#' [0,1].}
#'
#' \item{\code{p}}{The value of the significance threshold,
#' which was later renamed \code{alpha}
#' (in the paper, not in this function).}
#'
#' \item{\code{resettozero}}{A flag; if it zero, then the
#' ffmean will be reset to zero
#' after each change. Usually set
#' to 1 (i.e. do not reset).}
#'
#' \item{\code{u_init}}{The initial value of \code{u}.
#' Should be set to 0.}
#'
#' \item{\code{v_init}}{The initial value of \code{v}.
#' Should be set to 0.}
#'
#' \item{\code{w_init}}{The initial value of \code{w}.
#' Should be set to 0.}
#'
#' \item{\code{affmean_init}}{The initial value of the
#' forgetting factor mean,
#' \code{ffmean}.
#' Should be set to 0.}
#'
#' \item{\code{affvar_init}}{The initial value of the
#' forgetting factor variance,
#' \code{ffvar}.
#' Should be set to 0.}
#'
#' \item{\code{low_bound}}{The lower bound for \code{lambda}.
#' Usually set to \code{0.6}.}
#'
#' \item{\code{up_bound}}{The upper bound for \code{lambda}.
#' Usually set to \code{1}.}
#'
#' \item{\code{signchosen}}{The sign used in the gradient.
#' descent. Usually set to
#' \code{-1}.}
#'
#' \item{\code{alpha}}{The value of the step size in
#' the gradient descent step. In
#' the paper it is referred to
#' as \eqn{\epsilon}.
#' Usually \code{0.01}, or otherwise
#' \code{0.1} or \code{0.001}.}
#'
#' }
#'
#' @param mu0 The prechange mean, which is assumed known in this context
#'
#' @param sigma0 The prechange standard deviation, which is assumed known
#' in this context
#'
#'
#' @return A vector with the values of the adaptive forgetting factor
#' \eqn{\overrightarrow{\lambda}}.
#'
#'
#' @keywords internal
AFF_scaled_stream_jumpdetect_prechange <- function(stream, BL, affparams,
mu0, sigma0){
#parameters needed to run AFF algorithm
#alpha <- 0.01
lambda <- affparams[[1]]
p <- affparams[[2]]
resettozero <- affparams[[3]]
#cat("okay...")
u_new <- affparams[[4]]
v_new <- affparams[[5]]
v_old <- v_new
w_new <- affparams[[6]]
affmean_new <- affparams[[7]]
#need m now...
m_new <- affmean_new*w_new
affvar_new <- affparams[[8]]
low_bound <- affparams[[9]]
up_bound <- affparams[[10]]
#should be minus 1, I think
signchosen <- affparams[[11]]
alpha <- affparams[[12]]
#init this somehow?
xbar_new_deriv <- 0
Delta_new <- 0
Omega_new <- 0
#will run until stream finished...
streampos <- 0
N <- length(stream)
detected_count <- 0
#vector for saving jumps, will trim later
#just a quick way of saving space - can't have more than BL*det_count obs in stream,
#since every det restarts BL
detect_pos_vec <- rep(0, 1)
#this is the quantity by which we multiply L_deriv_new - the inverse of the affderiv average
inv_AFFderiv_estim_BL <- 0
inv_var_burn <- 0
lambda_vec <- rep(0, N)
#a vector for saving AFF
#we deleted the saving of the adlambdavec - see testaff1.R for a version with it saved
jumpdetected <- FALSE
while ( (streampos < N) && (jumpdetected==FALSE) ){
#begin the process of burning in and then detecting
#--------------------------------------------------------------------#
#Phase 1: burn-in; estimating parameters
#we are only resetting the burn-in estimators
#need to estimate mean and variance in burn in period
#we set the forgetting factor to be 1
lambda_beforeBL <- 1
w_BL_new <- 0
u_BL_new <- 0
v_BL_new <- 0
v_BL_old <- 0
#initialize mean and variance
mean_BL_new <- 0
var_BL_new <- 0
#set the estimated AFF deriv to 0
AFFderiv_estim_BL <- 0
#reset BLcount
BLcount <- 0
#cat("streampos: ", streampos, ", AFFderiv_estim_BL: ", AFFderiv_estim_BL, "\n")
#set values for BL mean and variance
mean_burn <- mu0
var_burn <- sigma0^2
burnvec <- c(p, mean_burn, var_burn)
#end of Phase 1
#--------------------------------------------------------------------#
if (var_burn > 0){
inv_var_burn <- 1 / var_burn
} else {
inv_var_burn <- 1
}
#--------------------------------------------------------------------#
#Phase 2: after BL; monitoring for jump
#jump_found is a boolean that flags if a jump is detected
#jump_found <- FALSE
thelimits <- c(0,0)
#a boolean saying if the jump has been detected
jumpdetected <- FALSE
while((jumpdetected==FALSE) && (streampos < N)){
#get the next observation from stream
streampos <- update_streampos(streampos)
x_new <- get_nextobs_fromstream(stream, streampos)
#cat("streampos: ", streampos, ", lambda: ", lambda, "\n")
#derivatives
Delta_new <- update_Delta(Delta_new, lambda, m_new)
Omega_new <- update_Omega(Omega_new, lambda, w_new)
#easier to update weight than to recalculate it
#get BL+1 w and u
m_new <- update_m(m_new, lambda, x_new)
w_new <- update_w(w_new, lambda)
u_new <- update_u(u_new, w_new)
#no need to update v, but we do anyway...
v_old <- v_new
v_new <- getv_from_u_and_w(u_new, w_new)
#update mean
affmean_old <- affmean_new
affmean_new <- get_xbar(m_new, w_new)
xbar_old_deriv <- xbar_new_deriv
xbar_new_deriv <- get_xbar_deriv(Delta_new, affmean_new, Omega_new, w_new)
#update variance
affvar_old <- affvar_new
affvar_new <- update_var(affvar_old, affmean_old, lambda, v_old, v_new, w_new, x_new)
#new function used to calculate limits efficiently, in one function, instead of in several
thelimits <- getNormalLimitsFromList(burnvec, u_new)
#boolean of whether or not ffmean is int he limits
inLimitsBool <- isInLimitsNormal(affmean_new, thelimits)
#check for jump
if(inLimitsBool == FALSE)
{
jumpdetected <- TRUE
detected_count <- detected_count + 1
detect_pos_vec[detected_count] <- streampos
}
#updating lambda
L_deriv_new <- get_L_deriv(affmean_old, xbar_old_deriv, x_new)
#time to scale it:
L_deriv_scaled <- L_deriv_new*inv_var_burn
lambda <- update_lambda(lambda, signchosen, alpha, L_deriv_scaled)
lambda <- inbounds(lambda, low_bound, up_bound)
#save!
lambda_vec[streampos] <- lambda
} #end of while (jumpdetected==FALSE) - will restart process, if (streampos < N)
} #end while (streampos < N)
#trim detect_pos_vec
detect_pos_vec <- detect_pos_vec[1:detected_count]
return( list(tau=detect_pos_vec, lambda=lambda_vec) )
}
#end AFF
| /ffstream/R/originalRcodePrechange.R | no_license | akhikolla/InformationHouse | R | false | false | 21,101 | r | #' CUSUM change detection for a stream in R with known prechange parameters
#'
#' Original implementation in R of CUSUM change detector, but now
#' expecting the prechange mean and variance to be specified.
#'
#'
#' @param stream The stream of observations.
#'
#' @param BL The burn-in length - this won't actually be used, but is kept
#' for historical reasons.
#'
#' @param params A list of parameters for the CUSUM algorithm. Consists of
#' \describe{
#' \item{\code{d}}{A control parameter also known as
#' \eqn{k}.}
#'
#' \item{\code{B}}{A control parameter also known as
#' \eqn{h}.}
#' }
#'
#' @param mu0 The prechange mean, which is assumed known in this context
#'
#' @param sigma0 The prechange standard deviation, which is assumed known
#' in this context
#'
#'
#' @return A vector of estimated changepoints.
#'
#'
#' @section Author:
#' Dean Bodenham
#'
#'
#' @section References:
#' D. A. Bodenham and N. M. Adams (2016)
#' \emph{Continuous monitoring for changepoints in data
#' streams using adaptive estimation}.
#' Statistics and Computing
#' doi:10.1007/s11222-016-9684-8
#'
#'
#' @keywords internal
CUSUM_stream_jumpdetect_prechange <- function(stream, BL, params, mu0, sigma0){
d <- params[[1]]
B <- params[[2]]
#will run until stream finished...
streampos <- 1
N <- length(stream)
detected_count <- 0
#vector for saving jumps, will trim later
#just a quick way of saving space - can't have more than BL*det_count obs in stream,
#only one changepoint
M <- 1
detect_pos_vec <- rep(0, M)
#jump_found is a boolean that flags if a jump is detected
jump_found <- FALSE
while ((streampos < N) && (jump_found==FALSE)){
#set values for CUSUM mean and variance
mean_j <- mu0
var_j <- sigma0^2
sd_j <- sqrt(var_j)
nu_j <- d*sd_j
control_j <- B*sd_j
#----------end of Phase 1: burn-in----------#
#--------------------------------------------------------------------#
#Phase 2: detect change
#CUSUM starts, sample from distribution 1
S_j <- 0
T_j <- 0
isOutOfLimitsBool <- FALSE
#j starts at 0
j <- 0
timedetected <- N
while ((jump_found==FALSE) && (streampos < N)) {
#update j, then last run is when j=N
#which starts when j=N-1
j <- j+1
#get the next observation from stream
x_new <- get_nextobs_fromstream(stream, streampos)
streampos <- update_streampos(streampos)
#DO NOT update control parameters
#update cusums
S_j <- S_j + x_new - mean_j - nu_j
S_j <- S_j*(S_j>0)
T_j <- T_j + mean_j - x_new - nu_j
T_j <- (T_j)*(T_j>0)
#check if outside control
S_isOut <- (S_j > control_j)
T_isOut <- (T_j > control_j)
#check if there is a jump
jump_found <- S_isOut | T_isOut
if (jump_found==TRUE){
detected_count <- detected_count + 1
#should use streampos-1
detect_pos_vec[detected_count] <- streampos
}
} # end of while ((jump_found==FALSE) && (streampos < N))
#end of Phase 2 - now restarting burn-in
} #end while (streampos < N)
#trim detect_pos_vec
detect_pos_vec <- detect_pos_vec[1:detected_count]
return(detect_pos_vec)
} # end of CUSUM detect
#' EWMA change detection for a stream in R with known prechange parameters
#'
#' Original implementation in R of EWMA change detector, but now
#' expecting the prechange mean and variance to be specified.
#'
#'
#' @param stream The stream of observations.
#'
#' @param BL The burn-in length - this won't actually be used, but is
#' kept for historical reasons.
#'
#' @param params A list of parameters for the EWMA algorithm. Consists of
#' \describe{
#' \item{\code{r}}{A control parameter which controls
#' the rate of downweighting.}
#'
#' \item{\code{L}}{A control parameter which determines
#' the width of the control limits.}
#' }
#'
#' @param mu0 The prechange mean, which is assumed known in this context
#'
#' @param sigma0 The prechange standard deviation, which is assumed known
#' in this context
#'
#'
#' @return A vector of estimated changepoints.
#'
#'
#' @keywords internal
EWMA_stream_jumpdetect_prechange <- function(stream, BL, params, mu0, sigma0){
#ewma params: c(r_1, L_1)
#also r =alpha, L=beta
r <- params[[1]]
L <- params[2]
#will run until stream finished...
streampos <- 1
N <- length(stream)
detected_count <- 0
#vector for saving jumps, will trim later
#just a quick way of saving space - can't have more than BL*det_count obs in stream,
#since every det restarts BL
detect_pos_vec <- rep(0, 1)
delta <- r / (2-r)
rFactorSigmaZ <- 1
sigmaZ <- 1
jump_found <- FALSE
while ((streampos < N) && (jump_found==FALSE)){
#set values for EWMA mean and variance
mu_1 <- mu0
sigma1_sq <- sigma0^2
sigma_1 <- sqrt(sigma1_sq)
#set EWMA control limits
#UL: upperlimit
UL <- mu_1 + L * sigma_1 * delta
#LL: lowerlimit
LL <- mu_1 - L * sigma_1 * delta
#----------end of Phase 1: burn-in----------#
#--------------------------------------------------------------------#
#Phase 2: detect change
#jump_found is a boolean that flags if a jump is detected
jump_found <- FALSE
isOutOfLimitsBool <- FALSE
timedetected <- N
#W <- mean_new
W <- 0
delta <- r/(2-r)
rFactorSigmaZ <- 1
sigmaZ <- 1
while ((jump_found==FALSE) && (streampos < N)) {
#get the next observation from stream
x_new <- get_nextobs_fromstream(stream, streampos)
streampos <- update_streampos(streampos)
#DO NOT update control parameters
#the ewma updating step
W <- r * x_new + (1-r)*W
rFactorSigmaZ <- rFactorSigmaZ * (1-r)^2
sigmaZ <- sqrt( delta * (1-rFactorSigmaZ) ) * sigma_1
UL <- mu_1 + L * sigmaZ
#LL: lowerlimit
LL <- mu_1 - L * sigmaZ
#check if there is a jump
if((W > UL) | (W < LL)){
jump_found <- TRUE
detected_count <- detected_count + 1
detect_pos_vec[detected_count] <- streampos
}
} # end of while ((jump_found==FALSE) && (streampos < N))
#end of Phase 2 - now restarting burn-in
} #end while (streampos < N)
#trim detect_pos_vec
detect_pos_vec <- detect_pos_vec[1:detected_count]
return(detect_pos_vec)
}#end of EWMA jump detect
#-------------------------------------------------------------------------#
#' Change detection using the Fixed Forgetting Factor method, prechange known
#'
#' Original implementation in R of FFF change detector, but now prechange is
#' known
#'
#' @param stream The stream of observations.
#'
#' @param BL The burn-in length - this won't actually be used, but is kept
#' for historical reasons.
#'
#' @param ffparams An \emph{unnamed} list of parameters for the FFF algorithm.
#' Consists of:
#' \describe{
#' \item{\code{lambda}}{The value of the fixed forgetting
#' factor (FFF). Should be in the range
#' [0,1].}
#'
#' \item{\code{p}}{The value of the significance threshold,
#' which was later renamed \code{alpha}
#' (in the paper, not in this function).}
#'
#' \item{\code{resettozero}}{A flag; if it zero, then the
#' ffmean will be reset to zero
#' after each change. Usually set
#' to 1 (i.e. do not reset).}
#'
#' \item{\code{u_init}}{The initial value of \code{u}.
#' Should be set to 0.}
#'
#' \item{\code{v_init}}{The initial value of \code{v}.
#' Should be set to 0.}
#'
#' \item{\code{w_init}}{The initial value of \code{w}.
#' Should be set to 0.}
#'
#' \item{\code{ffmean_init}}{The initial value of the
#' forgetting factor mean,
#' \code{ffmean}.
#' Should be set to 0.}
#'
#' \item{\code{ffvar_init}}{The initial value of the
#' forgetting factor variance,
#' \code{ffvar}.
#' Should be set to 0.}
#'
#' }
#'
#' @param mu0 The prechange mean, which is assumed known in this context
#'
#' @param sigma0 The prechange standard deviation, which is assumed known
#' in this context
#'
#' @return A vector of estimated changepoints.
#'
#'
#' @keywords internal
FFF_stream_jumpdetect_prechange <- function(stream, BL, ffparams, mu0, sigma0){
lambda <- ffparams[[1]]
p <- ffparams[[2]]
resettozero <- ffparams[[3]]
u_new <- ffparams[[4]]
v_new <- ffparams[[5]]
v_old <- v_new
w_new <- ffparams[[6]]
ffmean_new <- ffparams[[7]]
ffvar_new <- ffparams[[8]]
#will run until stream finished...
streampos <- 1
N <- length(stream)
detected_count <- 0
detect_pos_vec <- rep(0, 1)
jump_found <- FALSE
while ( (streampos < N) && (jump_found==FALSE)){
#begin the process of burning in and then detecting
#--------------------------------------------------------------------#
#Phase 1: burn-in; estimating parameters
#need to estimate mean and variance in burn in period
#we set the forgetting factor to be 1
lambda_beforeBL <- 1
w_BL_new <- 0
u_BL_new <- 0
v_BL_new <- 0
v_BL_old <- 0
#initialize mean and variance
mean_BL_new <- 0
var_BL_new <- 0
#set values for BL mean and variance
mean_burn <- mu0
var_burn <- sigma0^2
burnvec <- c(p, mean_burn, var_burn)
#--------------------------------------------------------------------#
#Phase 2: between burn-in and change
if (resettozero==0){
w_new <- 0
u_new <- 0
v_new <- 0
v_old <- 0
#initialize mean and variance
ffmean_new <- mu0
ffvar_new <- sigma0^2
#cat("resetting ff arl1...\n")
}
#--------------------------------------------------------------------#
#Phase 2: after BL; monitoring for jump
#jump_found is a boolean that flags if a jump is detected
#jump_found <- FALSE
thelimits <- c(0,0)
#a boolean saying if the jump has been detected
while((jump_found==FALSE) && (streampos < N)){
#get the next observation from stream
x_new <- get_nextobs_fromstream(stream, streampos)
streampos <- update_streampos(streampos)
#easier to update weight than to recalculate it
#get BL+1 w and u
w_new <- update_w(w_new, lambda)
u_new <- update_u(u_new, w_new)
#no need to update v, but we do anyway...
v_old <- v_new
v_new <- getv_from_u_and_w(u_new, w_new)
#update mean; old mean saved for variance later
#alternatively, could update variance first
#just the way the derivations work out...(deeper meaning, actually...)
ffmean_old <- ffmean_new
ffmean_new <- update_ffmean(ffmean_old, w_new, x_new)
#update variance
ffvar_old <- ffvar_new
ffvar_new <- update_var(ffvar_old, ffmean_old, lambda, v_old, v_new, w_new, x_new)
#new function used to calculate limits efficiently, in one function, instead of in several
#using Chebyshev
sigma_sq_u <- u_new*var_burn
# thelimits <- getmeanchebylimits(mean_burn, sigma_sq_u, p)
thelimits <- getNormalLimitsFromList(burnvec, u_new)
#boolean of whether or not ffmean is int he limits
inLimitsBool <- isInLimitsNormal(ffmean_new, thelimits)
#check for jump
if(inLimitsBool == FALSE)
{
jump_found <- TRUE
detected_count <- detected_count + 1
# detect_pos_vec[detected_count] <- streampos
#need to -1
detect_pos_vec[detected_count] <- streampos-1
}
} #end of while (jumpdetected==FALSE) - will restart process, if (streampos < N)
#end of Phase 2 - now either restart or if stream is finsihed return the detect_pos_vec
} #end while (streampos < N)
#trim detect_pos_vec
detect_pos_vec <- detect_pos_vec[1:detected_count]
return(detect_pos_vec)
}
#end FFF
#-------------------------------------------------------------------------#
#' Change detection using the AFF method, using prechange mean and vairance
#'
#' Original implementation in R of the AFF, with prechange parameters
#'
#'
#' @param stream The stream of observations.
#'
#' @param BL The burn-in length - this won't actually be used, but is kept
#' for historical reasons.
#'
#' @param affparams An \emph{unnamed} list of parameters for the FFF algorithm.
#' Consists of:
#' \describe{
#' \item{\code{lambda}}{The value of the fixed forgetting
#' factor (FFF). Should be in the range
#' [0,1].}
#'
#' \item{\code{p}}{The value of the significance threshold,
#' which was later renamed \code{alpha}
#' (in the paper, not in this function).}
#'
#' \item{\code{resettozero}}{A flag; if it zero, then the
#' ffmean will be reset to zero
#' after each change. Usually set
#' to 1 (i.e. do not reset).}
#'
#' \item{\code{u_init}}{The initial value of \code{u}.
#' Should be set to 0.}
#'
#' \item{\code{v_init}}{The initial value of \code{v}.
#' Should be set to 0.}
#'
#' \item{\code{w_init}}{The initial value of \code{w}.
#' Should be set to 0.}
#'
#' \item{\code{affmean_init}}{The initial value of the
#' forgetting factor mean,
#' \code{ffmean}.
#' Should be set to 0.}
#'
#' \item{\code{affvar_init}}{The initial value of the
#' forgetting factor variance,
#' \code{ffvar}.
#' Should be set to 0.}
#'
#' \item{\code{low_bound}}{The lower bound for \code{lambda}.
#' Usually set to \code{0.6}.}
#'
#' \item{\code{up_bound}}{The upper bound for \code{lambda}.
#' Usually set to \code{1}.}
#'
#' \item{\code{signchosen}}{The sign used in the gradient.
#' descent. Usually set to
#' \code{-1}.}
#'
#' \item{\code{alpha}}{The value of the step size in
#' the gradient descent step. In
#' the paper it is referred to
#' as \eqn{\epsilon}.
#' Usually \code{0.01}, or otherwise
#' \code{0.1} or \code{0.001}.}
#'
#' }
#'
#' @param mu0 The prechange mean, which is assumed known in this context
#'
#' @param sigma0 The prechange standard deviation, which is assumed known
#' in this context
#'
#'
#' @return A vector with the values of the adaptive forgetting factor
#' \eqn{\overrightarrow{\lambda}}.
#'
#'
#' @keywords internal
AFF_scaled_stream_jumpdetect_prechange <- function(stream, BL, affparams,
mu0, sigma0){
#parameters needed to run AFF algorithm
#alpha <- 0.01
lambda <- affparams[[1]]
p <- affparams[[2]]
resettozero <- affparams[[3]]
#cat("okay...")
u_new <- affparams[[4]]
v_new <- affparams[[5]]
v_old <- v_new
w_new <- affparams[[6]]
affmean_new <- affparams[[7]]
#need m now...
m_new <- affmean_new*w_new
affvar_new <- affparams[[8]]
low_bound <- affparams[[9]]
up_bound <- affparams[[10]]
#should be minus 1, I think
signchosen <- affparams[[11]]
alpha <- affparams[[12]]
#init this somehow?
xbar_new_deriv <- 0
Delta_new <- 0
Omega_new <- 0
#will run until stream finished...
streampos <- 0
N <- length(stream)
detected_count <- 0
#vector for saving jumps, will trim later
#just a quick way of saving space - can't have more than BL*det_count obs in stream,
#since every det restarts BL
detect_pos_vec <- rep(0, 1)
#this is the quantity by which we multiply L_deriv_new - the inverse of the affderiv average
inv_AFFderiv_estim_BL <- 0
inv_var_burn <- 0
lambda_vec <- rep(0, N)
#a vector for saving AFF
#we deleted the saving of the adlambdavec - see testaff1.R for a version with it saved
jumpdetected <- FALSE
while ( (streampos < N) && (jumpdetected==FALSE) ){
#begin the process of burning in and then detecting
#--------------------------------------------------------------------#
#Phase 1: burn-in; estimating parameters
#we are only resetting the burn-in estimators
#need to estimate mean and variance in burn in period
#we set the forgetting factor to be 1
lambda_beforeBL <- 1
w_BL_new <- 0
u_BL_new <- 0
v_BL_new <- 0
v_BL_old <- 0
#initialize mean and variance
mean_BL_new <- 0
var_BL_new <- 0
#set the estimated AFF deriv to 0
AFFderiv_estim_BL <- 0
#reset BLcount
BLcount <- 0
#cat("streampos: ", streampos, ", AFFderiv_estim_BL: ", AFFderiv_estim_BL, "\n")
#set values for BL mean and variance
mean_burn <- mu0
var_burn <- sigma0^2
burnvec <- c(p, mean_burn, var_burn)
#end of Phase 1
#--------------------------------------------------------------------#
if (var_burn > 0){
inv_var_burn <- 1 / var_burn
} else {
inv_var_burn <- 1
}
#--------------------------------------------------------------------#
#Phase 2: after BL; monitoring for jump
#jump_found is a boolean that flags if a jump is detected
#jump_found <- FALSE
thelimits <- c(0,0)
#a boolean saying if the jump has been detected
jumpdetected <- FALSE
while((jumpdetected==FALSE) && (streampos < N)){
#get the next observation from stream
streampos <- update_streampos(streampos)
x_new <- get_nextobs_fromstream(stream, streampos)
#cat("streampos: ", streampos, ", lambda: ", lambda, "\n")
#derivatives
Delta_new <- update_Delta(Delta_new, lambda, m_new)
Omega_new <- update_Omega(Omega_new, lambda, w_new)
#easier to update weight than to recalculate it
#get BL+1 w and u
m_new <- update_m(m_new, lambda, x_new)
w_new <- update_w(w_new, lambda)
u_new <- update_u(u_new, w_new)
#no need to update v, but we do anyway...
v_old <- v_new
v_new <- getv_from_u_and_w(u_new, w_new)
#update mean
affmean_old <- affmean_new
affmean_new <- get_xbar(m_new, w_new)
xbar_old_deriv <- xbar_new_deriv
xbar_new_deriv <- get_xbar_deriv(Delta_new, affmean_new, Omega_new, w_new)
#update variance
affvar_old <- affvar_new
affvar_new <- update_var(affvar_old, affmean_old, lambda, v_old, v_new, w_new, x_new)
#new function used to calculate limits efficiently, in one function, instead of in several
thelimits <- getNormalLimitsFromList(burnvec, u_new)
#boolean of whether or not ffmean is int he limits
inLimitsBool <- isInLimitsNormal(affmean_new, thelimits)
#check for jump
if(inLimitsBool == FALSE)
{
jumpdetected <- TRUE
detected_count <- detected_count + 1
detect_pos_vec[detected_count] <- streampos
}
#updating lambda
L_deriv_new <- get_L_deriv(affmean_old, xbar_old_deriv, x_new)
#time to scale it:
L_deriv_scaled <- L_deriv_new*inv_var_burn
lambda <- update_lambda(lambda, signchosen, alpha, L_deriv_scaled)
lambda <- inbounds(lambda, low_bound, up_bound)
#save!
lambda_vec[streampos] <- lambda
} #end of while (jumpdetected==FALSE) - will restart process, if (streampos < N)
} #end while (streampos < N)
#trim detect_pos_vec
detect_pos_vec <- detect_pos_vec[1:detected_count]
return( list(tau=detect_pos_vec, lambda=lambda_vec) )
}
#end AFF
|
# globals expected
# processCounties(counties) -> populousCounties
#processWB(hhsdata,plans) -> stateSummary
#counties <- read.csv("./CO-EST2013-Alldata.csv")
processCountyPlans <- function(ctyplans) {
state = ctyplans[1,"State"]
allCounties[[ctyplans[1,"State"]]] -> popCounties
gsub(" COUNTY$","",popCounties[,"CTYNAME"]) -> popCounties[,"CTYNAME"]
countyPop = popCounties[charmatch(ctyplans[,2],popCounties$CTYNAME),"CENSUS2010POP"][1]
statePop = stateSummary[stateSummary$stateabb == state,"Pop"]
bronzeFraction = stateSummary[stateSummary$stateabb == state,"bronze"]*countyPop/statePop
silverFraction = stateSummary[stateSummary$stateabb == state,"silver"]*countyPop/statePop
goldFraction = stateSummary[stateSummary$stateabb == state,"gold"]*countyPop/statePop
ctybronze <- ctyplans[which(ctyplans$Metal.Level=="Bronze" & !is.na(charmatch(ctyplans[,2],popCounties$CTYNAME))),]
ctysilver <- ctyplans[which(ctyplans$Metal.Level=="Silver" & !is.na(charmatch(ctyplans[,2],popCounties$CTYNAME))),]
ctygold <- ctyplans[which(ctyplans$Metal.Level=="Gold" & !is.na(charmatch(ctyplans[,2],popCounties$CTYNAME))),]
if (nrow(ctybronze) != 0) {
ctybronze[order(ctybronze$Premium.Adult.Individual.Age.40),] -> ctybronze
ctybronzeprc <- ctybronze[,"Premium.Adult.Individual.Age.40"]
ctybronzenumplans <- length(ctybronze[,"Premium.Adult.Individual.Age.40"])
numCarriers <- length(unique(ctybronze[,"Issuer.Name"]))
competitiveCutoff <- min(ctybronzeprc) + ifelse(is.na(sd(ctybronzeprc)),0,sd(ctybronzeprc))
competitiveCarriers <- ctybronze[which(ctybronze$Premium.Adult.Individual.Age.40<=competitiveCutoff),"Issuer.Name"]
ctysilver[order(ctysilver$Premium.Adult.Individual.Age.40),] -> ctysilver
ctysilverprc <- ctysilver[,"Premium.Adult.Individual.Age.40"]
ctysilvernumplans <- length(ctysilver[,"Premium.Adult.Individual.Age.40"])
numCarriersSilver <- length(unique(ctysilver[,"Issuer.Name"]))
competitiveCutoffsilver <- min(ctysilverprc) + sd(ctysilverprc)
competitiveCarriersSilver <- ctysilver[which(ctysilver$Premium.Adult.Individual.Age.40<=competitiveCutoffsilver),"Issuer.Name"]
ctygold[order(ctygold$Premium.Adult.Individual.Age.40),] -> ctygold
ctygoldprc <- ctygold[,"Premium.Adult.Individual.Age.40"]
ctygoldnumplans <- length(ctygold[,"Premium.Adult.Individual.Age.40"])
numCarriersgold <- length(unique(ctygold[,"Issuer.Name"]))
competitiveCutoffgold <- min(ctygoldprc) + sd(ctygoldprc)
competitiveCarriersgold <- ctygold[which(ctygold$Premium.Adult.Individual.Age.40<=competitiveCutoffgold),"Issuer.Name"]
competitiveCarrier1 = as.character(NA)
competitiveCarrier2 = as.character(NA)
competitiveCarrier3 = as.character(NA)
competitiveCarrier4 = as.character(NA)
competitiveCarrier5 = as.character(NA)
competitiveCarrier6 = as.character(NA)
i <- 1
unique(competitiveCarriers) -> uniqCarriers
if (length(uniqCarriers) > 6) {
uniqCarriers <- uniqCarriers[1:6,]
}
for (cc in unique(competitiveCarriers)) {
if (i ==1) {competitiveCarrier1 <- cc}
if (i ==2) {competitiveCarrier2 <- cc}
if (i ==3) {competitiveCarrier3 <- cc}
if (i ==4) {competitiveCarrier4 <- cc}
if (i ==5) {competitiveCarrier5 <- cc}
if (i ==6) {competitiveCarrier6 <- cc}
i <- i + 1
}
list(#nrow(ctybronze),
#class = class(ctyplans),
state = state,
bronzeFraction = bronzeFraction,
numCarriersBronze = numCarriers,
numCarriersSilver = numCarriersSilver,
#competitiveCarriers = unique(competitiveCarriers),
numCarriersgold = numCarriersgold,
countyPop = popCounties[charmatch(ctyplans[,2],popCounties$CTYNAME),"CENSUS2010POP"][1],
medianctybronzeprc = median(ctybronzeprc),
meanctybronzeprc = mean(ctybronzeprc),
sdctybronzeprc = sd(ctybronzeprc),
maxctybronzeprc = max(ctybronzeprc),
minctybronzeprc = min(ctybronzeprc),
ctybronzenumplans = ctybronzenumplans,
#ctybronzeprc = ctybronzeprc,
competitiveBronze = length(unique(competitiveCarriers)),
silverFraction = silverFraction,
medianctysilverprc = median(ctysilverprc),
meanctysilverprc = mean(ctysilverprc),
sdctysilverprc = sd(ctysilverprc),
maxctysilverprc = max(ctysilverprc),
minctysilverprc = min(ctysilverprc),
ctysilvernumplans = ctysilvernumplans,
#ctysilverprc = ctysilverprc,
competitiveSilver = length(unique(competitiveCarriersSilver)),
goldFraction = goldFraction,
medianctygoldprc = median(ctygoldprc),
meanctygoldprc = mean(ctygoldprc),
sdctygoldprc = sd(ctygoldprc),
maxctygoldprc = max(ctygoldprc),
minctygoldprc = min(ctygoldprc),
ctygoldnumplans = ctygoldnumplans,
#ctygoldprc = ctygoldprc,
competitiveGold = length(unique(competitiveCarriersgold)),
compCarrier1 = competitiveCarrier1,
compCarrier2 = competitiveCarrier2,
compCarrier3 = competitiveCarrier3,
compCarrier4 = competitiveCarrier4,
compCarrier5 = competitiveCarrier5,
compCarrier6 = competitiveCarrier6
)
}
}
processStatePlans <- function(stateplans) {
by(stateplans,stateplans$County,processCountyPlans) -> cty
#lapply(cty, function(x) { if (!is.null(x[[1]])) x[[1]] } ) -> fff
cty[!sapply(cty, is.null)] -> cty
ldply (cty, data.frame) -> ctydf
ctydf <- ctydf[is.na(ctydf$compCarrier3),]
}
marketSummaryStatePlans <-function(all = FALSE) {
#plans <- read.csv("out",colClasses = "character")
processCounties(counties,all) ->> allCounties
by(plans,plans$State,processStatePlans) -> lll
ldply (lll, data.frame) -> lll
ddply(lll,.(state,compCarrier1,compCarrier2),summarize,
marketSize = sum(bronzeFraction*meanctybronzeprc,na.rm = TRUE)
+ sum(silverFraction*meanctysilverprc,na.rm = TRUE)
+ sum(goldFraction*meanctygoldprc,na.rm = TRUE),
numPlans = sum(bronzeFraction) + sum(silverFraction) + sum(goldFraction)) -> mkt
mkt[order(mkt$state,-mkt$marketSize),]
}
processAllStatePlans <- function(stateplans) {
by(stateplans,stateplans$County,processCountyPlans) -> cty
#lapply(cty, function(x) { if (!is.null(x[[1]])) x[[1]] } ) -> fff
cty[!sapply(cty, is.null)] -> cty
ldply (cty, data.frame) -> ctydf
#ctydf <- ctydf[is.na(ctydf$compCarrier3),]
}
allStatePlansByCounty <-function() {
#plans <- read.csv("out",colClasses = "character")
processCounties(counties,TRUE) ->> allCounties
by(plans,plans$State,processAllStatePlans) -> lll
ldply (lll, data.frame)
}
summaryAllStatePlans <-function() {
#plans <- read.csv("out",colClasses = "character")
processCounties(counties,TRUE) ->> allCounties
by(plans,plans$State,processAllStatePlans) -> lll
ldply (lll, data.frame) -> lll
ddply(lll,.(state),summarize,
marketSize = sum(bronzeFraction*meanctybronzeprc,na.rm = TRUE)
+ sum(silverFraction*meanctysilverprc,na.rm = TRUE)
+ sum(goldFraction*meanctygoldprc,na.rm = TRUE),
numPlans = sum(bronzeFraction,na.rm = TRUE) + sum(silverFraction,na.rm = TRUE) + sum(goldFraction,na.rm = TRUE)) -> bests
#mkt[order(mkt$state,-mkt$marketSize),]
read.csv("./brokercomp.csv") -> brokerComps
cbind(brokerComps,state.abb) -> brokerComps
subset(brokerComps,select = c(ind,state.abb)) -> brokerComps
names(brokerComps)[2] <- "state"
merge(bests,brokerComps) ->bests
bests[,"numPlans"]*bests[,"ind"] -> bests$relVal
bests[order(-bests$relVal),]
}
processBestCarrierTargets <- function(df) {
as.character(df[,"compCarrier1"])
carriers <- c(as.character(df[,"compCarrier1"]),as.character(df[,"compCarrier2"]))
carriers <- carriers[complete.cases(carriers)]
length(carriers)
uniqCarriers <- unique(carriers)
length(uniqCarriers)
#if (length(uniqCarriers)>=2) {
uc <- matrix()
if (length(uniqCarriers)>2) {
combinations(length(uniqCarriers),2,uniqCarriers) -> uc
} else if (length(uniqCarriers)==2) {
uc = matrix(uniqCarriers,nrow=1)
} else {
uc = matrix(c(uniqCarriers,"NULL CARRIER"),nrow=1)
}
dfreturn = data.frame()
ucMax <- character()
max <- 0
numPlans <- 0
idMax <- numeric()
for (i in 1:nrow(uc)) {
localMax <- 0
idLocal <- numeric(0)
localNumPlans <- 0
for (j in 1:nrow(df)) {
if ( (df[j,"compCarrier1"] == uc[i,1] && is.na(df[j,"compCarrier2"])) ||
(df[j,"compCarrier1"] == uc[i,2] && is.na(df[j,"compCarrier2"])) ||
(df[j,"compCarrier1"] == uc[i,1] && df[j,"compCarrier2"] == uc[i,2]) ||
(df[j,"compCarrier1"] == uc[i,2] && df[j,"compCarrier2"] == uc[i,1])) {
localMax <- localMax + df[j,"marketSize"]
localNumPlans <- localNumPlans + df[j,"numPlans"]
idLocal <- c(idLocal,j)
}
}
if (localMax > max) {
max <- localMax
numPlans <- localNumPlans
idMax <- idLocal
ucMax <- uc[i,]
}
}
for (i in idMax) {
dfreturn <- rbind(dfreturn,df[i,])
}
#dfreturn
list(carrier1 = ucMax[1], carrier2 = ucMax[2], max = max, numPlans = numPlans) -> bests
#} else {
# 2
#}
}
bestCarrierTargets <- function(mkt = NULL) {
if (is.null(mkt)) {mkt <- marketSummaryStatePlans()}
dlply(mkt,.(state),processBestCarrierTargets) -> bests
ldply (bests, data.frame) -> bests
read.csv("./brokercomp.csv") -> brokerComps
cbind(brokerComps,state.abb) -> brokerComps
subset(brokerComps,select = c(ind,state.abb)) -> brokerComps
names(brokerComps)[2] <- "state"
merge(bests,brokerComps) ->bests
bests[,"numPlans"]*bests[,"ind"] -> bests$relVal
bests[order(-bests$relVal),]
}
| /processCountyPlans.R | no_license | rlump/allplans | R | false | false | 9,819 | r | # globals expected
# processCounties(counties) -> populousCounties
#processWB(hhsdata,plans) -> stateSummary
#counties <- read.csv("./CO-EST2013-Alldata.csv")
processCountyPlans <- function(ctyplans) {
state = ctyplans[1,"State"]
allCounties[[ctyplans[1,"State"]]] -> popCounties
gsub(" COUNTY$","",popCounties[,"CTYNAME"]) -> popCounties[,"CTYNAME"]
countyPop = popCounties[charmatch(ctyplans[,2],popCounties$CTYNAME),"CENSUS2010POP"][1]
statePop = stateSummary[stateSummary$stateabb == state,"Pop"]
bronzeFraction = stateSummary[stateSummary$stateabb == state,"bronze"]*countyPop/statePop
silverFraction = stateSummary[stateSummary$stateabb == state,"silver"]*countyPop/statePop
goldFraction = stateSummary[stateSummary$stateabb == state,"gold"]*countyPop/statePop
ctybronze <- ctyplans[which(ctyplans$Metal.Level=="Bronze" & !is.na(charmatch(ctyplans[,2],popCounties$CTYNAME))),]
ctysilver <- ctyplans[which(ctyplans$Metal.Level=="Silver" & !is.na(charmatch(ctyplans[,2],popCounties$CTYNAME))),]
ctygold <- ctyplans[which(ctyplans$Metal.Level=="Gold" & !is.na(charmatch(ctyplans[,2],popCounties$CTYNAME))),]
if (nrow(ctybronze) != 0) {
ctybronze[order(ctybronze$Premium.Adult.Individual.Age.40),] -> ctybronze
ctybronzeprc <- ctybronze[,"Premium.Adult.Individual.Age.40"]
ctybronzenumplans <- length(ctybronze[,"Premium.Adult.Individual.Age.40"])
numCarriers <- length(unique(ctybronze[,"Issuer.Name"]))
competitiveCutoff <- min(ctybronzeprc) + ifelse(is.na(sd(ctybronzeprc)),0,sd(ctybronzeprc))
competitiveCarriers <- ctybronze[which(ctybronze$Premium.Adult.Individual.Age.40<=competitiveCutoff),"Issuer.Name"]
ctysilver[order(ctysilver$Premium.Adult.Individual.Age.40),] -> ctysilver
ctysilverprc <- ctysilver[,"Premium.Adult.Individual.Age.40"]
ctysilvernumplans <- length(ctysilver[,"Premium.Adult.Individual.Age.40"])
numCarriersSilver <- length(unique(ctysilver[,"Issuer.Name"]))
competitiveCutoffsilver <- min(ctysilverprc) + sd(ctysilverprc)
competitiveCarriersSilver <- ctysilver[which(ctysilver$Premium.Adult.Individual.Age.40<=competitiveCutoffsilver),"Issuer.Name"]
ctygold[order(ctygold$Premium.Adult.Individual.Age.40),] -> ctygold
ctygoldprc <- ctygold[,"Premium.Adult.Individual.Age.40"]
ctygoldnumplans <- length(ctygold[,"Premium.Adult.Individual.Age.40"])
numCarriersgold <- length(unique(ctygold[,"Issuer.Name"]))
competitiveCutoffgold <- min(ctygoldprc) + sd(ctygoldprc)
competitiveCarriersgold <- ctygold[which(ctygold$Premium.Adult.Individual.Age.40<=competitiveCutoffgold),"Issuer.Name"]
competitiveCarrier1 = as.character(NA)
competitiveCarrier2 = as.character(NA)
competitiveCarrier3 = as.character(NA)
competitiveCarrier4 = as.character(NA)
competitiveCarrier5 = as.character(NA)
competitiveCarrier6 = as.character(NA)
i <- 1
unique(competitiveCarriers) -> uniqCarriers
if (length(uniqCarriers) > 6) {
uniqCarriers <- uniqCarriers[1:6,]
}
for (cc in unique(competitiveCarriers)) {
if (i ==1) {competitiveCarrier1 <- cc}
if (i ==2) {competitiveCarrier2 <- cc}
if (i ==3) {competitiveCarrier3 <- cc}
if (i ==4) {competitiveCarrier4 <- cc}
if (i ==5) {competitiveCarrier5 <- cc}
if (i ==6) {competitiveCarrier6 <- cc}
i <- i + 1
}
list(#nrow(ctybronze),
#class = class(ctyplans),
state = state,
bronzeFraction = bronzeFraction,
numCarriersBronze = numCarriers,
numCarriersSilver = numCarriersSilver,
#competitiveCarriers = unique(competitiveCarriers),
numCarriersgold = numCarriersgold,
countyPop = popCounties[charmatch(ctyplans[,2],popCounties$CTYNAME),"CENSUS2010POP"][1],
medianctybronzeprc = median(ctybronzeprc),
meanctybronzeprc = mean(ctybronzeprc),
sdctybronzeprc = sd(ctybronzeprc),
maxctybronzeprc = max(ctybronzeprc),
minctybronzeprc = min(ctybronzeprc),
ctybronzenumplans = ctybronzenumplans,
#ctybronzeprc = ctybronzeprc,
competitiveBronze = length(unique(competitiveCarriers)),
silverFraction = silverFraction,
medianctysilverprc = median(ctysilverprc),
meanctysilverprc = mean(ctysilverprc),
sdctysilverprc = sd(ctysilverprc),
maxctysilverprc = max(ctysilverprc),
minctysilverprc = min(ctysilverprc),
ctysilvernumplans = ctysilvernumplans,
#ctysilverprc = ctysilverprc,
competitiveSilver = length(unique(competitiveCarriersSilver)),
goldFraction = goldFraction,
medianctygoldprc = median(ctygoldprc),
meanctygoldprc = mean(ctygoldprc),
sdctygoldprc = sd(ctygoldprc),
maxctygoldprc = max(ctygoldprc),
minctygoldprc = min(ctygoldprc),
ctygoldnumplans = ctygoldnumplans,
#ctygoldprc = ctygoldprc,
competitiveGold = length(unique(competitiveCarriersgold)),
compCarrier1 = competitiveCarrier1,
compCarrier2 = competitiveCarrier2,
compCarrier3 = competitiveCarrier3,
compCarrier4 = competitiveCarrier4,
compCarrier5 = competitiveCarrier5,
compCarrier6 = competitiveCarrier6
)
}
}
processStatePlans <- function(stateplans) {
by(stateplans,stateplans$County,processCountyPlans) -> cty
#lapply(cty, function(x) { if (!is.null(x[[1]])) x[[1]] } ) -> fff
cty[!sapply(cty, is.null)] -> cty
ldply (cty, data.frame) -> ctydf
ctydf <- ctydf[is.na(ctydf$compCarrier3),]
}
marketSummaryStatePlans <-function(all = FALSE) {
#plans <- read.csv("out",colClasses = "character")
processCounties(counties,all) ->> allCounties
by(plans,plans$State,processStatePlans) -> lll
ldply (lll, data.frame) -> lll
ddply(lll,.(state,compCarrier1,compCarrier2),summarize,
marketSize = sum(bronzeFraction*meanctybronzeprc,na.rm = TRUE)
+ sum(silverFraction*meanctysilverprc,na.rm = TRUE)
+ sum(goldFraction*meanctygoldprc,na.rm = TRUE),
numPlans = sum(bronzeFraction) + sum(silverFraction) + sum(goldFraction)) -> mkt
mkt[order(mkt$state,-mkt$marketSize),]
}
processAllStatePlans <- function(stateplans) {
by(stateplans,stateplans$County,processCountyPlans) -> cty
#lapply(cty, function(x) { if (!is.null(x[[1]])) x[[1]] } ) -> fff
cty[!sapply(cty, is.null)] -> cty
ldply (cty, data.frame) -> ctydf
#ctydf <- ctydf[is.na(ctydf$compCarrier3),]
}
allStatePlansByCounty <-function() {
#plans <- read.csv("out",colClasses = "character")
processCounties(counties,TRUE) ->> allCounties
by(plans,plans$State,processAllStatePlans) -> lll
ldply (lll, data.frame)
}
summaryAllStatePlans <-function() {
#plans <- read.csv("out",colClasses = "character")
processCounties(counties,TRUE) ->> allCounties
by(plans,plans$State,processAllStatePlans) -> lll
ldply (lll, data.frame) -> lll
ddply(lll,.(state),summarize,
marketSize = sum(bronzeFraction*meanctybronzeprc,na.rm = TRUE)
+ sum(silverFraction*meanctysilverprc,na.rm = TRUE)
+ sum(goldFraction*meanctygoldprc,na.rm = TRUE),
numPlans = sum(bronzeFraction,na.rm = TRUE) + sum(silverFraction,na.rm = TRUE) + sum(goldFraction,na.rm = TRUE)) -> bests
#mkt[order(mkt$state,-mkt$marketSize),]
read.csv("./brokercomp.csv") -> brokerComps
cbind(brokerComps,state.abb) -> brokerComps
subset(brokerComps,select = c(ind,state.abb)) -> brokerComps
names(brokerComps)[2] <- "state"
merge(bests,brokerComps) ->bests
bests[,"numPlans"]*bests[,"ind"] -> bests$relVal
bests[order(-bests$relVal),]
}
processBestCarrierTargets <- function(df) {
as.character(df[,"compCarrier1"])
carriers <- c(as.character(df[,"compCarrier1"]),as.character(df[,"compCarrier2"]))
carriers <- carriers[complete.cases(carriers)]
length(carriers)
uniqCarriers <- unique(carriers)
length(uniqCarriers)
#if (length(uniqCarriers)>=2) {
uc <- matrix()
if (length(uniqCarriers)>2) {
combinations(length(uniqCarriers),2,uniqCarriers) -> uc
} else if (length(uniqCarriers)==2) {
uc = matrix(uniqCarriers,nrow=1)
} else {
uc = matrix(c(uniqCarriers,"NULL CARRIER"),nrow=1)
}
dfreturn = data.frame()
ucMax <- character()
max <- 0
numPlans <- 0
idMax <- numeric()
for (i in 1:nrow(uc)) {
localMax <- 0
idLocal <- numeric(0)
localNumPlans <- 0
for (j in 1:nrow(df)) {
if ( (df[j,"compCarrier1"] == uc[i,1] && is.na(df[j,"compCarrier2"])) ||
(df[j,"compCarrier1"] == uc[i,2] && is.na(df[j,"compCarrier2"])) ||
(df[j,"compCarrier1"] == uc[i,1] && df[j,"compCarrier2"] == uc[i,2]) ||
(df[j,"compCarrier1"] == uc[i,2] && df[j,"compCarrier2"] == uc[i,1])) {
localMax <- localMax + df[j,"marketSize"]
localNumPlans <- localNumPlans + df[j,"numPlans"]
idLocal <- c(idLocal,j)
}
}
if (localMax > max) {
max <- localMax
numPlans <- localNumPlans
idMax <- idLocal
ucMax <- uc[i,]
}
}
for (i in idMax) {
dfreturn <- rbind(dfreturn,df[i,])
}
#dfreturn
list(carrier1 = ucMax[1], carrier2 = ucMax[2], max = max, numPlans = numPlans) -> bests
#} else {
# 2
#}
}
bestCarrierTargets <- function(mkt = NULL) {
if (is.null(mkt)) {mkt <- marketSummaryStatePlans()}
dlply(mkt,.(state),processBestCarrierTargets) -> bests
ldply (bests, data.frame) -> bests
read.csv("./brokercomp.csv") -> brokerComps
cbind(brokerComps,state.abb) -> brokerComps
subset(brokerComps,select = c(ind,state.abb)) -> brokerComps
names(brokerComps)[2] <- "state"
merge(bests,brokerComps) ->bests
bests[,"numPlans"]*bests[,"ind"] -> bests$relVal
bests[order(-bests$relVal),]
}
|
###For mu_F=0.1
rm(list=ls())
#setwd("C:\\Users\\jiaojin1\\Downloads\\PhD work")
library(deSolve)
library(reshape)
library(ggplot2)
library(scales)
library(pheatmap)
library(tidyverse)
################test
#####L is fishing ground while T is MPAs, total area is S
JAP08<-function(t, inits,parameters) {
with(as.list(c(inits, parameters)),{
x<-inits[1:L]
y<-inits[(L+1):(L+T)]
A<-array(NA,dim=c(1,(L+T)))
if(L==1)
{
A[1]<-R-(mu+F)*x[1]-D1*x[1]+D2/2*y[1]+D2/2*y[T]
}
else
{
A[1]<-R-(mu+F)*x[1]-D1*x[1]+D1/2*x[2]+D2/2*y[1]
A[L]<-R-(mu+F)*x[L]-D1*x[L]+D1/2*x[L-1]+D2/2*y[T]
}
if(L-1>=2)
{
for(i in 2:(L-1))
{
A[i]<-R-(mu+F)*x[i]-D1*x[i]+D1/2*(x[i-1]+x[i+1])
}
}
if(T==1)
{
A[(L+1)]<-R-mu*y[1]-D2*y[1]+D1/2*x[L]+D1/2*x[1]
}
else
{
A[(L+1)]<-R-mu*y[1]-D2*y[1]+D2/2*y[2]+D1/2*x[1]
A[(T+L)]<-R-mu*y[T]-D2*y[T]+D2/2*y[T-1]+D1/2*x[L]
}
if(T-1>=2)
{
for(i in (L+2):(T+L-1))
{
A[i]<-R-mu*y[i-L]-D2*y[i-L]+D2/2*(y[i-L-1]+y[i-L+1])
}
}
list(c(A))
})
}
#################
Timesteps=500
times <- seq(0, Timesteps, by = 1)
S=10
inits <- rep(1,S)
beta<-seq(1,4.1,0.25)
h<-seq(1,9,1)
##F here is mu_F in the model
F=0.1
##h is for MPA size, r is for differential movement
eqn<-array(NA,dim=c(length(beta),length(h),S))
eqn_before<-array(NA,dim=c(length(beta),length(h),S))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
for(z in 1:S)
{
parameters <- c(T=h[j],L=S-h[j],R=2,mu=0.5,D1=0.5*beta[i],D2=0.5,F=0.1)
out= ode(y = inits, times = times, func = JAP08, parms = parameters)
eqn[i,j,z]<-out[Timesteps+1,z+1]
###before data for each cell
parameters_before <- c(T=h[j],L=S-h[j],R=2,mu=0.5,D1=0*beta[i],D2=0,F=0.1)
out_before= ode(y = inits, times = times, func = JAP08, parms = parameters_before)
eqn_before[i,j,z]<-out_before[Timesteps+1,z+1]
}
}
}
###before-after
###using after density at each cell in MPA / before density at each cell
####after mean/before mean indicates the before-after effect
eqnmean_ba<-array(NA,dim=c(length(beta),length(h),2))
before_after<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
eqnmean_ba[i,j,1]<-mean(eqn_before[i,j,1:(S-h[j])]) # mean density before MPA
eqnmean_ba[i,j,2]<-mean(eqn[i,j,(S-h[j]+1):S]) # mean density in MPA
before_after[i,j]<-eqnmean_ba[i,j,2]/eqnmean_ba[i,j,1]
}
}
###before status
###local effect: =1 since there is no MPA
loc_before=1
###regional abundance:eqnmean_ba[1,1,1]=2.666667
reg_before<-eqnmean_ba[1,1,1]*10
###fishing yield
fis_before<-F*reg_before
#local effect
yield<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
yield[i,j]<-sum(eqn[i,j,1:(S-h[j])])*F
}
}
##local effect
eqnmean<-array(NA,dim=c(length(beta),length(h),2))
loceff<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
eqnmean[i,j,1]<-mean(eqn[i,j,1:(S-h[j])])
eqnmean[i,j,2]<-mean(eqn[i,j,(S-h[j]+1):S])
loceff[i,j]<-eqnmean[i,j,2]/eqnmean[i,j,1]
}
}
##regional effect
regeff<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
regeff[i,j]<-sum(eqn[i,j,1:S])
}
}
before_after1<-melt(before_after)
yield1<-melt(yield)
loceff1<-melt(loceff)
regeff1<-melt(regeff)
theme_set(theme_bw(20))
before_after2<-before_after1
#before_after2$value<-before_after1$value/before_after1$value[61]
before_after2$value<-before_after1$value/1
yield2<-yield1
#yield2$value<-yield1$value/yield1$value[61]
yield2$value<-yield1$value/fis_before
loceff2<-loceff1
#loceff2$value<-loceff1$value/loceff1$value[61]
loceff2$value<-loceff1$value/1
regeff2<-regeff1
#regeff2$value<-regeff1$value/regeff1$value[61]
regeff2$value<-regeff1$value/reg_before
tiff("Fig.S5_fishing yield-0.1.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(yield2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value))+guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue' ,
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
tiff("Fig.S5_local-effect-0.1.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(loceff2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value))+guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue' ,
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
tiff("Fig.S5_regional-effect-0.1.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(regeff2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value)) +guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue',
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
###For mu_F=0.25
rm(list=ls())
#setwd("C:\\Users\\jiaojin1\\Downloads\\PhD work")
library(deSolve)
library(reshape)
library(ggplot2)
library(scales)
library(pheatmap)
library(tidyverse)
################test
#####L is fishing ground while T is MPAs, total area is S
JAP08<-function(t, inits,parameters) {
with(as.list(c(inits, parameters)),{
x<-inits[1:L]
y<-inits[(L+1):(L+T)]
A<-array(NA,dim=c(1,(L+T)))
if(L==1)
{
A[1]<-R-(mu+F)*x[1]-D1*x[1]+D2/2*y[1]+D2/2*y[T]
}
else
{
A[1]<-R-(mu+F)*x[1]-D1*x[1]+D1/2*x[2]+D2/2*y[1]
A[L]<-R-(mu+F)*x[L]-D1*x[L]+D1/2*x[L-1]+D2/2*y[T]
}
if(L-1>=2)
{
for(i in 2:(L-1))
{
A[i]<-R-(mu+F)*x[i]-D1*x[i]+D1/2*(x[i-1]+x[i+1])
}
}
if(T==1)
{
A[(L+1)]<-R-mu*y[1]-D2*y[1]+D1/2*x[L]+D1/2*x[1]
}
else
{
A[(L+1)]<-R-mu*y[1]-D2*y[1]+D2/2*y[2]+D1/2*x[1]
A[(T+L)]<-R-mu*y[T]-D2*y[T]+D2/2*y[T-1]+D1/2*x[L]
}
if(T-1>=2)
{
for(i in (L+2):(T+L-1))
{
A[i]<-R-mu*y[i-L]-D2*y[i-L]+D2/2*(y[i-L-1]+y[i-L+1])
}
}
list(c(A))
})
}
#################
Timesteps=500
times <- seq(0, Timesteps, by = 1)
S=10
inits <- rep(1,S)
beta<-seq(1,4.1,0.25)
h<-seq(1,9,1)
##F here is mu_F in the model
F=0.25
##h is for MPA size, r is for differential movement
eqn<-array(NA,dim=c(length(beta),length(h),S))
eqn_before<-array(NA,dim=c(length(beta),length(h),S))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
for(z in 1:S)
{
parameters <- c(T=h[j],L=S-h[j],R=2,mu=0.5,D1=0.5*beta[i],D2=0.5,F=0.25)
out= ode(y = inits, times = times, func = JAP08, parms = parameters)
eqn[i,j,z]<-out[Timesteps+1,z+1]
###before data for each cell
parameters_before <- c(T=h[j],L=S-h[j],R=2,mu=0.5,D1=0*beta[i],D2=0,F=0.25)
out_before= ode(y = inits, times = times, func = JAP08, parms = parameters_before)
eqn_before[i,j,z]<-out_before[Timesteps+1,z+1]
}
}
}
###before-after
###using after density at each cell in MPA / before density at each cell
####after mean/before mean indicates the before-after effect
eqnmean_ba<-array(NA,dim=c(length(beta),length(h),2))
before_after<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
eqnmean_ba[i,j,1]<-mean(eqn_before[i,j,1:(S-h[j])]) # mean density before MPA
eqnmean_ba[i,j,2]<-mean(eqn[i,j,(S-h[j]+1):S]) # mean density in MPA
before_after[i,j]<-eqnmean_ba[i,j,2]/eqnmean_ba[i,j,1]
}
}
###before status
###local effect: =1 since there is no MPA
loc_before=1
###regional abundance:eqnmean_ba[1,1,1]=2.666667
reg_before<-eqnmean_ba[1,1,1]*10
###fishing yield
fis_before<-F*reg_before
#local effect
yield<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
yield[i,j]<-sum(eqn[i,j,1:(S-h[j])])*F
}
}
##local effect
eqnmean<-array(NA,dim=c(length(beta),length(h),2))
loceff<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
eqnmean[i,j,1]<-mean(eqn[i,j,1:(S-h[j])])
eqnmean[i,j,2]<-mean(eqn[i,j,(S-h[j]+1):S])
loceff[i,j]<-eqnmean[i,j,2]/eqnmean[i,j,1]
}
}
##regional effect
regeff<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
regeff[i,j]<-sum(eqn[i,j,1:S])
}
}
before_after1<-melt(before_after)
yield1<-melt(yield)
loceff1<-melt(loceff)
regeff1<-melt(regeff)
theme_set(theme_bw(20))
before_after2<-before_after1
#before_after2$value<-before_after1$value/before_after1$value[61]
before_after2$value<-before_after1$value/1
yield2<-yield1
#yield2$value<-yield1$value/yield1$value[61]
yield2$value<-yield1$value/fis_before
loceff2<-loceff1
#loceff2$value<-loceff1$value/loceff1$value[61]
loceff2$value<-loceff1$value/1
regeff2<-regeff1
#regeff2$value<-regeff1$value/regeff1$value[61]
regeff2$value<-regeff1$value/reg_before
tiff("Fig.S5_fishing yield-0.25.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(yield2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value))+guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue' ,
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
tiff("Fig.S5_local-effect-0.25.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(loceff2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value))+guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue' ,
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
tiff("Fig.S5_regional-effect-0.25.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(regeff2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value)) +guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue',
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
####For mu_F=0.45
rm(list=ls())
#setwd("C:\\Users\\jiaojin1\\Downloads\\PhD work")
library(deSolve)
library(reshape)
library(ggplot2)
library(scales)
library(pheatmap)
library(tidyverse)
################test
#####L is fishing ground while T is MPAs, total area is S
JAP08<-function(t, inits,parameters) {
with(as.list(c(inits, parameters)),{
x<-inits[1:L]
y<-inits[(L+1):(L+T)]
A<-array(NA,dim=c(1,(L+T)))
if(L==1)
{
A[1]<-R-(mu+F)*x[1]-D1*x[1]+D2/2*y[1]+D2/2*y[T]
}
else
{
A[1]<-R-(mu+F)*x[1]-D1*x[1]+D1/2*x[2]+D2/2*y[1]
A[L]<-R-(mu+F)*x[L]-D1*x[L]+D1/2*x[L-1]+D2/2*y[T]
}
if(L-1>=2)
{
for(i in 2:(L-1))
{
A[i]<-R-(mu+F)*x[i]-D1*x[i]+D1/2*(x[i-1]+x[i+1])
}
}
if(T==1)
{
A[(L+1)]<-R-mu*y[1]-D2*y[1]+D1/2*x[L]+D1/2*x[1]
}
else
{
A[(L+1)]<-R-mu*y[1]-D2*y[1]+D2/2*y[2]+D1/2*x[1]
A[(T+L)]<-R-mu*y[T]-D2*y[T]+D2/2*y[T-1]+D1/2*x[L]
}
if(T-1>=2)
{
for(i in (L+2):(T+L-1))
{
A[i]<-R-mu*y[i-L]-D2*y[i-L]+D2/2*(y[i-L-1]+y[i-L+1])
}
}
list(c(A))
})
}
#################
Timesteps=500
times <- seq(0, Timesteps, by = 1)
S=10
inits <- rep(1,S)
beta<-seq(1,4.1,0.25)
h<-seq(1,9,1)
##F here is mu_F in the model
F=0.45
##h is for MPA size, r is for differential movement
eqn<-array(NA,dim=c(length(beta),length(h),S))
eqn_before<-array(NA,dim=c(length(beta),length(h),S))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
for(z in 1:S)
{
parameters <- c(T=h[j],L=S-h[j],R=2,mu=0.5,D1=0.5*beta[i],D2=0.5,F=0.45)
out= ode(y = inits, times = times, func = JAP08, parms = parameters)
eqn[i,j,z]<-out[Timesteps+1,z+1]
###before data for each cell
parameters_before <- c(T=h[j],L=S-h[j],R=2,mu=0.5,D1=0*beta[i],D2=0,F=0.45)
out_before= ode(y = inits, times = times, func = JAP08, parms = parameters_before)
eqn_before[i,j,z]<-out_before[Timesteps+1,z+1]
}
}
}
###before-after
###using after density at each cell in MPA / before density at each cell
####after mean/before mean indicates the before-after effect
eqnmean_ba<-array(NA,dim=c(length(beta),length(h),2))
before_after<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
eqnmean_ba[i,j,1]<-mean(eqn_before[i,j,1:(S-h[j])]) # mean density before MPA
eqnmean_ba[i,j,2]<-mean(eqn[i,j,(S-h[j]+1):S]) # mean density in MPA
before_after[i,j]<-eqnmean_ba[i,j,2]/eqnmean_ba[i,j,1]
}
}
###before status
###local effect: =1 since there is no MPA
loc_before=1
###regional abundance:eqnmean_ba[1,1,1]=2.666667
reg_before<-eqnmean_ba[1,1,1]*10
###fishing yield
fis_before<-F*reg_before
#local effect
yield<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
yield[i,j]<-sum(eqn[i,j,1:(S-h[j])])*F
}
}
##local effect
eqnmean<-array(NA,dim=c(length(beta),length(h),2))
loceff<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
eqnmean[i,j,1]<-mean(eqn[i,j,1:(S-h[j])])
eqnmean[i,j,2]<-mean(eqn[i,j,(S-h[j]+1):S])
loceff[i,j]<-eqnmean[i,j,2]/eqnmean[i,j,1]
}
}
##regional effect
regeff<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
regeff[i,j]<-sum(eqn[i,j,1:S])
}
}
before_after1<-melt(before_after)
yield1<-melt(yield)
loceff1<-melt(loceff)
regeff1<-melt(regeff)
theme_set(theme_bw(20))
before_after2<-before_after1
#before_after2$value<-before_after1$value/before_after1$value[61]
before_after2$value<-before_after1$value/1
yield2<-yield1
#yield2$value<-yield1$value/yield1$value[61]
yield2$value<-yield1$value/fis_before
loceff2<-loceff1
#loceff2$value<-loceff1$value/loceff1$value[61]
loceff2$value<-loceff1$value/1
regeff2<-regeff1
#regeff2$value<-regeff1$value/regeff1$value[61]
regeff2$value<-regeff1$value/reg_before
tiff("Fig.S5_fishing yield-0.45.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(yield2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value))+guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue' ,
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
tiff("Fig.S5_local-effect-0.45.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(loceff2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value))+guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue' ,
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
tiff("Fig.S5_regional-effect-0.45.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(regeff2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value)) +guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue',
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
| /Fig. S5 code copy.R | permissive | jiaojiaojing84/Ecology_2477 | R | false | false | 19,074 | r | ###For mu_F=0.1
rm(list=ls())
#setwd("C:\\Users\\jiaojin1\\Downloads\\PhD work")
library(deSolve)
library(reshape)
library(ggplot2)
library(scales)
library(pheatmap)
library(tidyverse)
################test
#####L is fishing ground while T is MPAs, total area is S
JAP08<-function(t, inits,parameters) {
with(as.list(c(inits, parameters)),{
x<-inits[1:L]
y<-inits[(L+1):(L+T)]
A<-array(NA,dim=c(1,(L+T)))
if(L==1)
{
A[1]<-R-(mu+F)*x[1]-D1*x[1]+D2/2*y[1]+D2/2*y[T]
}
else
{
A[1]<-R-(mu+F)*x[1]-D1*x[1]+D1/2*x[2]+D2/2*y[1]
A[L]<-R-(mu+F)*x[L]-D1*x[L]+D1/2*x[L-1]+D2/2*y[T]
}
if(L-1>=2)
{
for(i in 2:(L-1))
{
A[i]<-R-(mu+F)*x[i]-D1*x[i]+D1/2*(x[i-1]+x[i+1])
}
}
if(T==1)
{
A[(L+1)]<-R-mu*y[1]-D2*y[1]+D1/2*x[L]+D1/2*x[1]
}
else
{
A[(L+1)]<-R-mu*y[1]-D2*y[1]+D2/2*y[2]+D1/2*x[1]
A[(T+L)]<-R-mu*y[T]-D2*y[T]+D2/2*y[T-1]+D1/2*x[L]
}
if(T-1>=2)
{
for(i in (L+2):(T+L-1))
{
A[i]<-R-mu*y[i-L]-D2*y[i-L]+D2/2*(y[i-L-1]+y[i-L+1])
}
}
list(c(A))
})
}
#################
Timesteps=500
times <- seq(0, Timesteps, by = 1)
S=10
inits <- rep(1,S)
beta<-seq(1,4.1,0.25)
h<-seq(1,9,1)
##F here is mu_F in the model
F=0.1
##h is for MPA size, r is for differential movement
eqn<-array(NA,dim=c(length(beta),length(h),S))
eqn_before<-array(NA,dim=c(length(beta),length(h),S))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
for(z in 1:S)
{
parameters <- c(T=h[j],L=S-h[j],R=2,mu=0.5,D1=0.5*beta[i],D2=0.5,F=0.1)
out= ode(y = inits, times = times, func = JAP08, parms = parameters)
eqn[i,j,z]<-out[Timesteps+1,z+1]
###before data for each cell
parameters_before <- c(T=h[j],L=S-h[j],R=2,mu=0.5,D1=0*beta[i],D2=0,F=0.1)
out_before= ode(y = inits, times = times, func = JAP08, parms = parameters_before)
eqn_before[i,j,z]<-out_before[Timesteps+1,z+1]
}
}
}
###before-after
###using after density at each cell in MPA / before density at each cell
####after mean/before mean indicates the before-after effect
eqnmean_ba<-array(NA,dim=c(length(beta),length(h),2))
before_after<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
eqnmean_ba[i,j,1]<-mean(eqn_before[i,j,1:(S-h[j])]) # mean density before MPA
eqnmean_ba[i,j,2]<-mean(eqn[i,j,(S-h[j]+1):S]) # mean density in MPA
before_after[i,j]<-eqnmean_ba[i,j,2]/eqnmean_ba[i,j,1]
}
}
###before status
###local effect: =1 since there is no MPA
loc_before=1
###regional abundance:eqnmean_ba[1,1,1]=2.666667
reg_before<-eqnmean_ba[1,1,1]*10
###fishing yield
fis_before<-F*reg_before
#local effect
yield<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
yield[i,j]<-sum(eqn[i,j,1:(S-h[j])])*F
}
}
##local effect
eqnmean<-array(NA,dim=c(length(beta),length(h),2))
loceff<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
eqnmean[i,j,1]<-mean(eqn[i,j,1:(S-h[j])])
eqnmean[i,j,2]<-mean(eqn[i,j,(S-h[j]+1):S])
loceff[i,j]<-eqnmean[i,j,2]/eqnmean[i,j,1]
}
}
##regional effect
regeff<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
regeff[i,j]<-sum(eqn[i,j,1:S])
}
}
before_after1<-melt(before_after)
yield1<-melt(yield)
loceff1<-melt(loceff)
regeff1<-melt(regeff)
theme_set(theme_bw(20))
before_after2<-before_after1
#before_after2$value<-before_after1$value/before_after1$value[61]
before_after2$value<-before_after1$value/1
yield2<-yield1
#yield2$value<-yield1$value/yield1$value[61]
yield2$value<-yield1$value/fis_before
loceff2<-loceff1
#loceff2$value<-loceff1$value/loceff1$value[61]
loceff2$value<-loceff1$value/1
regeff2<-regeff1
#regeff2$value<-regeff1$value/regeff1$value[61]
regeff2$value<-regeff1$value/reg_before
tiff("Fig.S5_fishing yield-0.1.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(yield2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value))+guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue' ,
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
tiff("Fig.S5_local-effect-0.1.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(loceff2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value))+guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue' ,
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
tiff("Fig.S5_regional-effect-0.1.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(regeff2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value)) +guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue',
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
###For mu_F=0.25
rm(list=ls())
#setwd("C:\\Users\\jiaojin1\\Downloads\\PhD work")
library(deSolve)
library(reshape)
library(ggplot2)
library(scales)
library(pheatmap)
library(tidyverse)
################test
#####L is fishing ground while T is MPAs, total area is S
JAP08<-function(t, inits,parameters) {
with(as.list(c(inits, parameters)),{
x<-inits[1:L]
y<-inits[(L+1):(L+T)]
A<-array(NA,dim=c(1,(L+T)))
if(L==1)
{
A[1]<-R-(mu+F)*x[1]-D1*x[1]+D2/2*y[1]+D2/2*y[T]
}
else
{
A[1]<-R-(mu+F)*x[1]-D1*x[1]+D1/2*x[2]+D2/2*y[1]
A[L]<-R-(mu+F)*x[L]-D1*x[L]+D1/2*x[L-1]+D2/2*y[T]
}
if(L-1>=2)
{
for(i in 2:(L-1))
{
A[i]<-R-(mu+F)*x[i]-D1*x[i]+D1/2*(x[i-1]+x[i+1])
}
}
if(T==1)
{
A[(L+1)]<-R-mu*y[1]-D2*y[1]+D1/2*x[L]+D1/2*x[1]
}
else
{
A[(L+1)]<-R-mu*y[1]-D2*y[1]+D2/2*y[2]+D1/2*x[1]
A[(T+L)]<-R-mu*y[T]-D2*y[T]+D2/2*y[T-1]+D1/2*x[L]
}
if(T-1>=2)
{
for(i in (L+2):(T+L-1))
{
A[i]<-R-mu*y[i-L]-D2*y[i-L]+D2/2*(y[i-L-1]+y[i-L+1])
}
}
list(c(A))
})
}
#################
Timesteps=500
times <- seq(0, Timesteps, by = 1)
S=10
inits <- rep(1,S)
beta<-seq(1,4.1,0.25)
h<-seq(1,9,1)
##F here is mu_F in the model
F=0.25
##h is for MPA size, r is for differential movement
eqn<-array(NA,dim=c(length(beta),length(h),S))
eqn_before<-array(NA,dim=c(length(beta),length(h),S))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
for(z in 1:S)
{
parameters <- c(T=h[j],L=S-h[j],R=2,mu=0.5,D1=0.5*beta[i],D2=0.5,F=0.25)
out= ode(y = inits, times = times, func = JAP08, parms = parameters)
eqn[i,j,z]<-out[Timesteps+1,z+1]
###before data for each cell
parameters_before <- c(T=h[j],L=S-h[j],R=2,mu=0.5,D1=0*beta[i],D2=0,F=0.25)
out_before= ode(y = inits, times = times, func = JAP08, parms = parameters_before)
eqn_before[i,j,z]<-out_before[Timesteps+1,z+1]
}
}
}
###before-after
###using after density at each cell in MPA / before density at each cell
####after mean/before mean indicates the before-after effect
eqnmean_ba<-array(NA,dim=c(length(beta),length(h),2))
before_after<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
eqnmean_ba[i,j,1]<-mean(eqn_before[i,j,1:(S-h[j])]) # mean density before MPA
eqnmean_ba[i,j,2]<-mean(eqn[i,j,(S-h[j]+1):S]) # mean density in MPA
before_after[i,j]<-eqnmean_ba[i,j,2]/eqnmean_ba[i,j,1]
}
}
###before status
###local effect: =1 since there is no MPA
loc_before=1
###regional abundance:eqnmean_ba[1,1,1]=2.666667
reg_before<-eqnmean_ba[1,1,1]*10
###fishing yield
fis_before<-F*reg_before
#local effect
yield<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
yield[i,j]<-sum(eqn[i,j,1:(S-h[j])])*F
}
}
##local effect
eqnmean<-array(NA,dim=c(length(beta),length(h),2))
loceff<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
eqnmean[i,j,1]<-mean(eqn[i,j,1:(S-h[j])])
eqnmean[i,j,2]<-mean(eqn[i,j,(S-h[j]+1):S])
loceff[i,j]<-eqnmean[i,j,2]/eqnmean[i,j,1]
}
}
##regional effect
regeff<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
regeff[i,j]<-sum(eqn[i,j,1:S])
}
}
before_after1<-melt(before_after)
yield1<-melt(yield)
loceff1<-melt(loceff)
regeff1<-melt(regeff)
theme_set(theme_bw(20))
before_after2<-before_after1
#before_after2$value<-before_after1$value/before_after1$value[61]
before_after2$value<-before_after1$value/1
yield2<-yield1
#yield2$value<-yield1$value/yield1$value[61]
yield2$value<-yield1$value/fis_before
loceff2<-loceff1
#loceff2$value<-loceff1$value/loceff1$value[61]
loceff2$value<-loceff1$value/1
regeff2<-regeff1
#regeff2$value<-regeff1$value/regeff1$value[61]
regeff2$value<-regeff1$value/reg_before
tiff("Fig.S5_fishing yield-0.25.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(yield2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value))+guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue' ,
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
tiff("Fig.S5_local-effect-0.25.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(loceff2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value))+guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue' ,
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
tiff("Fig.S5_regional-effect-0.25.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(regeff2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value)) +guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue',
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
####For mu_F=0.45
rm(list=ls())
#setwd("C:\\Users\\jiaojin1\\Downloads\\PhD work")
library(deSolve)
library(reshape)
library(ggplot2)
library(scales)
library(pheatmap)
library(tidyverse)
################test
#####L is fishing ground while T is MPAs, total area is S
JAP08<-function(t, inits,parameters) {
with(as.list(c(inits, parameters)),{
x<-inits[1:L]
y<-inits[(L+1):(L+T)]
A<-array(NA,dim=c(1,(L+T)))
if(L==1)
{
A[1]<-R-(mu+F)*x[1]-D1*x[1]+D2/2*y[1]+D2/2*y[T]
}
else
{
A[1]<-R-(mu+F)*x[1]-D1*x[1]+D1/2*x[2]+D2/2*y[1]
A[L]<-R-(mu+F)*x[L]-D1*x[L]+D1/2*x[L-1]+D2/2*y[T]
}
if(L-1>=2)
{
for(i in 2:(L-1))
{
A[i]<-R-(mu+F)*x[i]-D1*x[i]+D1/2*(x[i-1]+x[i+1])
}
}
if(T==1)
{
A[(L+1)]<-R-mu*y[1]-D2*y[1]+D1/2*x[L]+D1/2*x[1]
}
else
{
A[(L+1)]<-R-mu*y[1]-D2*y[1]+D2/2*y[2]+D1/2*x[1]
A[(T+L)]<-R-mu*y[T]-D2*y[T]+D2/2*y[T-1]+D1/2*x[L]
}
if(T-1>=2)
{
for(i in (L+2):(T+L-1))
{
A[i]<-R-mu*y[i-L]-D2*y[i-L]+D2/2*(y[i-L-1]+y[i-L+1])
}
}
list(c(A))
})
}
#################
Timesteps=500
times <- seq(0, Timesteps, by = 1)
S=10
inits <- rep(1,S)
beta<-seq(1,4.1,0.25)
h<-seq(1,9,1)
##F here is mu_F in the model
F=0.45
##h is for MPA size, r is for differential movement
eqn<-array(NA,dim=c(length(beta),length(h),S))
eqn_before<-array(NA,dim=c(length(beta),length(h),S))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
for(z in 1:S)
{
parameters <- c(T=h[j],L=S-h[j],R=2,mu=0.5,D1=0.5*beta[i],D2=0.5,F=0.45)
out= ode(y = inits, times = times, func = JAP08, parms = parameters)
eqn[i,j,z]<-out[Timesteps+1,z+1]
###before data for each cell
parameters_before <- c(T=h[j],L=S-h[j],R=2,mu=0.5,D1=0*beta[i],D2=0,F=0.45)
out_before= ode(y = inits, times = times, func = JAP08, parms = parameters_before)
eqn_before[i,j,z]<-out_before[Timesteps+1,z+1]
}
}
}
###before-after
###using after density at each cell in MPA / before density at each cell
####after mean/before mean indicates the before-after effect
eqnmean_ba<-array(NA,dim=c(length(beta),length(h),2))
before_after<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
eqnmean_ba[i,j,1]<-mean(eqn_before[i,j,1:(S-h[j])]) # mean density before MPA
eqnmean_ba[i,j,2]<-mean(eqn[i,j,(S-h[j]+1):S]) # mean density in MPA
before_after[i,j]<-eqnmean_ba[i,j,2]/eqnmean_ba[i,j,1]
}
}
###before status
###local effect: =1 since there is no MPA
loc_before=1
###regional abundance:eqnmean_ba[1,1,1]=2.666667
reg_before<-eqnmean_ba[1,1,1]*10
###fishing yield
fis_before<-F*reg_before
#local effect
yield<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
yield[i,j]<-sum(eqn[i,j,1:(S-h[j])])*F
}
}
##local effect
eqnmean<-array(NA,dim=c(length(beta),length(h),2))
loceff<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
eqnmean[i,j,1]<-mean(eqn[i,j,1:(S-h[j])])
eqnmean[i,j,2]<-mean(eqn[i,j,(S-h[j]+1):S])
loceff[i,j]<-eqnmean[i,j,2]/eqnmean[i,j,1]
}
}
##regional effect
regeff<-array(NA,dim=c(length(beta),length(h)))
for(i in 1:length(beta))
{
for(j in 1:length(h))
{
regeff[i,j]<-sum(eqn[i,j,1:S])
}
}
before_after1<-melt(before_after)
yield1<-melt(yield)
loceff1<-melt(loceff)
regeff1<-melt(regeff)
theme_set(theme_bw(20))
before_after2<-before_after1
#before_after2$value<-before_after1$value/before_after1$value[61]
before_after2$value<-before_after1$value/1
yield2<-yield1
#yield2$value<-yield1$value/yield1$value[61]
yield2$value<-yield1$value/fis_before
loceff2<-loceff1
#loceff2$value<-loceff1$value/loceff1$value[61]
loceff2$value<-loceff1$value/1
regeff2<-regeff1
#regeff2$value<-regeff1$value/regeff1$value[61]
regeff2$value<-regeff1$value/reg_before
tiff("Fig.S5_fishing yield-0.45.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(yield2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value))+guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue' ,
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
tiff("Fig.S5_local-effect-0.45.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(loceff2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value))+guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue' ,
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
tiff("Fig.S5_regional-effect-0.45.tiff", width=5,height=5, units='in',res=600)
p1 <- ggplot(regeff2, aes(beta[X1], h[X2]/10)) + geom_tile(aes(fill = value)) +guides(fill = guide_colorbar(title=""))
p1+xlab("")+ylab('')+ scale_fill_gradient2(low = 'red', mid='white',high = 'steelblue',
midpoint=1, space = "rgb", na.value = "grey50", guide =
"colourbar")+scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand
= c(0, 0))
dev.off()
|
#===============================================================================
# gscp_7_set_up_dbms.R
#===============================================================================
# Start adding dbms code to replace data frames.
# Most of this part is cloned from dbms.initialise.melb.grassland.R
#===============================================================================
source (paste0 (sourceCodeLocationWithSlash, "dbms_functions.R"))
db_name = "test.db"
#------------------------------------------------------------
# Check whether database exists and remove it if it does.
#------------------------------------------------------------
safe.remove.file.if.exists (db_name)
#------------------------------------------------------------
# Create the DB and make the tables and
# column headings
#------------------------------------------------------------
connect_to_database (db_name)
# db_driver <- dbDriver("SQLite")
# db_con <- dbConnect (db_driver, db_name)
#------------------------------------------------------------
# Define the column names and types of the table
#------------------------------------------------------------
node_table_defn =
matrix (c ('ID', 'int',
'GROUP_ID', 'int',
'DEPENDENT_SET_MEMBER', 'int'
),
byrow = TRUE,
ncol = 2 )
link_table_defn =
matrix (c ('ID', 'int',
'NODE_1', 'int',
'NODE_2', 'int',
'LINK_DIRECTION', 'string' # "UN", "BI", "FT", "TF"
),
byrow = TRUE,
ncol = 2 )
#------------------------------------------------------------
# Build the sql expression to create the table using SQLite
#------------------------------------------------------------
node_table_name = "nodes"
sql_create_table_query =
build_sql_create_table_expression (node_table_name,
node_table_defn)
sql_send_operation (sql_create_table_query)
link_table_name = "links"
sql_create_table_query =
build_sql_create_table_expression (link_table_name,
node_table_defn)
sql_send_operation (sql_create_table_query)
# add some dummy data for testing
testSqlCmd <- paste0 ('insert into ', node_table_name,
' values (1, 0, 1)')
sql_send_operation (testSqlCmd)
#query2s <- 'insert into staticPUinfo values(1, 4567)';
#sql.send.operation( query2s );
testSqlCmd <- paste0 ('insert into ', link_table_name,
' values (1, 2, "un")')
sql_send_operation (testSqlCmd)
# some other example queries
#query4 <- 'update PUstatus set RESERVED = -1 where ID = 1';
#sql.send.operation( query4 );
#----------
close_database_connection()
#===============================================================================
| /projects/rdvPackages/biodivprobgen/R/gscp_7_set_up_dbms.R | no_license | langfob/rdv-framework-frozen-google-code-export-do-not-change | R | false | false | 2,933 | r | #===============================================================================
# gscp_7_set_up_dbms.R
#===============================================================================
# Start adding dbms code to replace data frames.
# Most of this part is cloned from dbms.initialise.melb.grassland.R
#===============================================================================
source (paste0 (sourceCodeLocationWithSlash, "dbms_functions.R"))
db_name = "test.db"
#------------------------------------------------------------
# Check whether database exists and remove it if it does.
#------------------------------------------------------------
safe.remove.file.if.exists (db_name)
#------------------------------------------------------------
# Create the DB and make the tables and
# column headings
#------------------------------------------------------------
connect_to_database (db_name)
# db_driver <- dbDriver("SQLite")
# db_con <- dbConnect (db_driver, db_name)
#------------------------------------------------------------
# Define the column names and types of the table
#------------------------------------------------------------
node_table_defn =
matrix (c ('ID', 'int',
'GROUP_ID', 'int',
'DEPENDENT_SET_MEMBER', 'int'
),
byrow = TRUE,
ncol = 2 )
link_table_defn =
matrix (c ('ID', 'int',
'NODE_1', 'int',
'NODE_2', 'int',
'LINK_DIRECTION', 'string' # "UN", "BI", "FT", "TF"
),
byrow = TRUE,
ncol = 2 )
#------------------------------------------------------------
# Build the sql expression to create the table using SQLite
#------------------------------------------------------------
node_table_name = "nodes"
sql_create_table_query =
build_sql_create_table_expression (node_table_name,
node_table_defn)
sql_send_operation (sql_create_table_query)
link_table_name = "links"
sql_create_table_query =
build_sql_create_table_expression (link_table_name,
node_table_defn)
sql_send_operation (sql_create_table_query)
# add some dummy data for testing
testSqlCmd <- paste0 ('insert into ', node_table_name,
' values (1, 0, 1)')
sql_send_operation (testSqlCmd)
#query2s <- 'insert into staticPUinfo values(1, 4567)';
#sql.send.operation( query2s );
testSqlCmd <- paste0 ('insert into ', link_table_name,
' values (1, 2, "un")')
sql_send_operation (testSqlCmd)
# some other example queries
#query4 <- 'update PUstatus set RESERVED = -1 where ID = 1';
#sql.send.operation( query4 );
#----------
close_database_connection()
#===============================================================================
|
#' Git information
#'
#' Displays git information from the repository of the current working space.
#'
#' @param position character with position of the parameter. Default "top right".
#'
#' @import git2r
#' @import shiny
#' @import glue
#' @export
git_info <- function(position = "top right") {
repo <- tryCatch({
repository(".")
}, error = function(e) {
NULL
})
if (is.null(repo)) {
git_message <- "It looks like you're not in a valid git repo."
} else {
branch <- repository_head(repo)[[1]]
if (is.null(branch)) {
git_message <- "Empty repo."
} else {
last_commit <- as.character(commits(repo)[[1]]$message)
stat <- status(repo)
changes <- ""
if (length(stat$unstaged$modified) + length(stat$staged$modified) > 0)
changes <- HTML("<font color='red'>(!) Not commited changes</b></font>")
git_message <- span(HTML(glue("Branch: <b>{branch}</b> </br>")),
HTML(glue("Last commit: <b>{last_commit}</b></br>")),
changes
)
}
}
display(git_message, position)
}
| /R/git.R | no_license | tomdewar/shiny.info | R | false | false | 1,151 | r | #' Git information
#'
#' Displays git information from the repository of the current working space.
#'
#' @param position character with position of the parameter. Default "top right".
#'
#' @import git2r
#' @import shiny
#' @import glue
#' @export
git_info <- function(position = "top right") {
repo <- tryCatch({
repository(".")
}, error = function(e) {
NULL
})
if (is.null(repo)) {
git_message <- "It looks like you're not in a valid git repo."
} else {
branch <- repository_head(repo)[[1]]
if (is.null(branch)) {
git_message <- "Empty repo."
} else {
last_commit <- as.character(commits(repo)[[1]]$message)
stat <- status(repo)
changes <- ""
if (length(stat$unstaged$modified) + length(stat$staged$modified) > 0)
changes <- HTML("<font color='red'>(!) Not commited changes</b></font>")
git_message <- span(HTML(glue("Branch: <b>{branch}</b> </br>")),
HTML(glue("Last commit: <b>{last_commit}</b></br>")),
changes
)
}
}
display(git_message, position)
}
|
library(plyr)
library(ggplot2)
##pulling data without nas
clean <- activity[!is.na(activity$steps),]
##create average number of steps per interval
intervalTable <- ddply(clean, .(interval), summarize, Avg = mean(steps))
##Create line plot of average number of steps per interval
p <- ggplot(intervalTable, aes(x=interval, y=Avg), xlab = "Interval", ylab="Average Number of Steps")
p + geom_line()+xlab("Interval")+ylab("Average Number of Steps")+ggtitle("Average Number of Steps per Interval")
| /averstpr - Copy.R | no_license | Jennik3379/reproducible1 | R | false | false | 497 | r | library(plyr)
library(ggplot2)
##pulling data without nas
clean <- activity[!is.na(activity$steps),]
##create average number of steps per interval
intervalTable <- ddply(clean, .(interval), summarize, Avg = mean(steps))
##Create line plot of average number of steps per interval
p <- ggplot(intervalTable, aes(x=interval, y=Avg), xlab = "Interval", ylab="Average Number of Steps")
p + geom_line()+xlab("Interval")+ylab("Average Number of Steps")+ggtitle("Average Number of Steps per Interval")
|
#' parPBADownloader
#'
#' @export parPBADownloader
#'
#' @param concellos Municipality name based on the pbaurls (availables in data)
#' @param outdir directory where file structure of downloads and products will be created
#' @param ncores number of cores used for the process
#'
#' @description función to do the same as PBADownloader in parallel
#' (careful with restrictions...max 5-6 simultaneous downloads)
#' OS independent function (written just for windows and linux)
#'
#' @return it returns nothing but the download and folder structure.
#'
#' @examples
#' \dontrun{
#' # PARAMETERS
#' data("pbaurls")
#' outdir <- '../02_OUTPUT/'
#'
#' pbaurls$Concello # show municipalities available
#' concellos <- list("Paderne", "Pol","A Peroxa")
#' parPBADownloader(concellos, outdir, ncores = 5)
#'
#' # DOWNLOAD ALL IN PARALLEL
#' concellos <- pbaurls$Concello
#' parPBADownloader(concellos, outdir, ncores = 6)
#' }
parPBADownloader <- function(concellos, outdir, pbaurls, ncores=5){
# set max cores, meaning max simulataneus downloads too
concellos = concellos
maxload <- ifelse(ncores>=5, 5, ncores)
if (Sys.info()[[1]] == "Windows") {
# Set parallel
cl <- parallel::makeCluster(maxload, type="PSOCK")
doParallel::registerDoParallel(cl)
clusterEvalQ(cl, library("PBADownloader")) # load libraries
clusterExport(cl, c('concellos', 'outdir', 'ncores'))
# Execute function
foreach(i=concellos) %dopar% {PBADownloader::PBADownloader(i, outdir, pbaurls)}
# Stop parallel
stopCluster(cl)
} else if (Sys.info()[[1]] == "Linux"){
# Set parallel
cl <- parallel::makeCluster(maxload, type="FORK")
doParallel::registerDoParallel(cl)
# Execute function
foreach(i = concellos) %dopar% {PBADownloader::PBADownloader(i, outdir, pbaurls)}
# Stop parallel
stopCluster(cl)
} else {
return ("Unknown OS system")
}
}
| /R/parPBADownloader.R | no_license | cesarkero/PBADownloader | R | false | false | 2,132 | r | #' parPBADownloader
#'
#' @export parPBADownloader
#'
#' @param concellos Municipality name based on the pbaurls (availables in data)
#' @param outdir directory where file structure of downloads and products will be created
#' @param ncores number of cores used for the process
#'
#' @description función to do the same as PBADownloader in parallel
#' (careful with restrictions...max 5-6 simultaneous downloads)
#' OS independent function (written just for windows and linux)
#'
#' @return it returns nothing but the download and folder structure.
#'
#' @examples
#' \dontrun{
#' # PARAMETERS
#' data("pbaurls")
#' outdir <- '../02_OUTPUT/'
#'
#' pbaurls$Concello # show municipalities available
#' concellos <- list("Paderne", "Pol","A Peroxa")
#' parPBADownloader(concellos, outdir, ncores = 5)
#'
#' # DOWNLOAD ALL IN PARALLEL
#' concellos <- pbaurls$Concello
#' parPBADownloader(concellos, outdir, ncores = 6)
#' }
parPBADownloader <- function(concellos, outdir, pbaurls, ncores=5){
# set max cores, meaning max simulataneus downloads too
concellos = concellos
maxload <- ifelse(ncores>=5, 5, ncores)
if (Sys.info()[[1]] == "Windows") {
# Set parallel
cl <- parallel::makeCluster(maxload, type="PSOCK")
doParallel::registerDoParallel(cl)
clusterEvalQ(cl, library("PBADownloader")) # load libraries
clusterExport(cl, c('concellos', 'outdir', 'ncores'))
# Execute function
foreach(i=concellos) %dopar% {PBADownloader::PBADownloader(i, outdir, pbaurls)}
# Stop parallel
stopCluster(cl)
} else if (Sys.info()[[1]] == "Linux"){
# Set parallel
cl <- parallel::makeCluster(maxload, type="FORK")
doParallel::registerDoParallel(cl)
# Execute function
foreach(i = concellos) %dopar% {PBADownloader::PBADownloader(i, outdir, pbaurls)}
# Stop parallel
stopCluster(cl)
} else {
return ("Unknown OS system")
}
}
|
#' Search shards.
#'
#' @export
#' @param index One or more indeces
#' @param routing A character vector of routing values to take into account
#' when determining which shards a request would be executed against.
#' @param preference Controls a preference of which shard replicas to execute
#' the search request on. By default, the operation is randomized between the
#' shard replicas. See [preference] for a list of all acceptable
#' values.
#' @param local (logical) Whether to read the cluster state locally in order
#' to determine where shards are allocated instead of using the Master node's
#' cluster state.
#' @param raw If `TRUE` (default), data is parsed to list. If `FALSE`, then
#' raw JSON
#' @param ... Curl args passed on to [httr::GET()]
#' @references
#' <https://www.elastic.co/guide/en/elasticsearch/reference/current/search-shards.html>
#' @examples \dontrun{
#' search_shards(index = "plos")
#' search_shards(index = c("plos","gbif"))
#' search_shards(index = "plos", preference='_primary')
#' search_shards(index = "plos", preference='_shards:2')
#'
#' library('httr')
#' search_shards(index = "plos", config=verbose())
#' }
search_shards <- function(index=NULL, raw=FALSE, routing=NULL, preference=NULL,
local=NULL, ...) {
url <- make_url(es_get_auth())
es_GET_(file.path(url, esc(cl(index)), "_search_shards"),
ec(list(routing = routing, preference = preference, local = local)),
...)
}
| /R/search_shards.R | permissive | dpmccabe/elastic | R | false | false | 1,475 | r | #' Search shards.
#'
#' @export
#' @param index One or more indeces
#' @param routing A character vector of routing values to take into account
#' when determining which shards a request would be executed against.
#' @param preference Controls a preference of which shard replicas to execute
#' the search request on. By default, the operation is randomized between the
#' shard replicas. See [preference] for a list of all acceptable
#' values.
#' @param local (logical) Whether to read the cluster state locally in order
#' to determine where shards are allocated instead of using the Master node's
#' cluster state.
#' @param raw If `TRUE` (default), data is parsed to list. If `FALSE`, then
#' raw JSON
#' @param ... Curl args passed on to [httr::GET()]
#' @references
#' <https://www.elastic.co/guide/en/elasticsearch/reference/current/search-shards.html>
#' @examples \dontrun{
#' search_shards(index = "plos")
#' search_shards(index = c("plos","gbif"))
#' search_shards(index = "plos", preference='_primary')
#' search_shards(index = "plos", preference='_shards:2')
#'
#' library('httr')
#' search_shards(index = "plos", config=verbose())
#' }
search_shards <- function(index=NULL, raw=FALSE, routing=NULL, preference=NULL,
local=NULL, ...) {
url <- make_url(es_get_auth())
es_GET_(file.path(url, esc(cl(index)), "_search_shards"),
ec(list(routing = routing, preference = preference, local = local)),
...)
}
|
library(plotly)
setwd('~/Downloads/battleship/data_visualization')
# board size x runtime, for model 1, for 3 propagators (propagators scaling with board size)
plot = read.csv('../results/large_board_runtime_model.csv')
plot = na.omit(plot)
p = plot_ly(plot, x = ~board.size, y = ~bt.runtime1, name = 'model1 BT', type = 'scatter', mode = 'lines', line = list(color = '#3F51B5', width = 2))%>%
add_trace(y = ~fc.runtime1, name = 'model1 FC', line = list(color = '#2196F3', width = 2)) %>%
add_trace(y = ~gac.runtime1, name = 'model1 GAC', line = list(color = '#B3E5FC', width = 2)) %>%
layout(title = "Propagators Scaling with Board Size",
xaxis = list(title = "Board Size"),
yaxis = list (title = "Runtime (sec)", type = "log"))
# board size x runtime, for model 1, for decreasing order, for 3 propagators (assignments/prunings with board size)
p = plot_ly(plot, x = ~board.size, y = ~bt.assignment1, name = 'model1 BT assignments', type = 'scatter', mode = 'lines', line = list(color = '#B71C1C', width = 2))%>%
add_trace(y = ~fc.assignment1, name = 'model1 FC assignments', line = list(color = 'rgb(0,255,0)', width = 2)) %>%
add_trace(y = ~fc.pruning1, name = 'model1 FC prunings', line = list(color = '#1B5E20', width = 2)) %>%
add_trace(y = ~gac.assignment1, name = 'model1 GAC assignments', line = list(color = '#2196F3', width = 2)) %>%
add_trace(y = ~gac.pruning1, name = 'model1 GAC prunings', line = list(color = '#1A237E', width = 2)) %>%
layout(title = "Assignments/Prunings Scaling with Board Size",
xaxis = list(title = "Board Size"),
yaxis = list (title = "Assignments/Prunings", type = "log"))
# board size x runtime, for model 1, for GAC, for 3 value orderings
plot = read.csv('../results/large_value_ordering.csv')
p = plot_ly(plot, x = ~board.size, y = ~dec.runtime1, name = 'GAC val_decreasing', type = 'scatter', mode = 'lines', line = list(color = 'rgb(255,0,0)', width = 2))%>%
add_trace(y = ~dec_lcv.runtime1, name = 'GAC val_decreasing_lcv', line = list(color = 'rgb(0,255,0)', width = 2)) %>%
add_trace(y = ~inc.runtime1, name = 'GAC val_increasing', line = list(color = 'rgb(0,0,255)', width = 2)) %>%
layout(title = "Models Scaling with Board Size",
xaxis = list(title = "Board Size"),
yaxis = list (title = "Runtime (sec)"))
| /data_visualization/adv.R | no_license | pyliaorachel/battleship-ai | R | false | false | 2,338 | r | library(plotly)
setwd('~/Downloads/battleship/data_visualization')
# board size x runtime, for model 1, for 3 propagators (propagators scaling with board size)
plot = read.csv('../results/large_board_runtime_model.csv')
plot = na.omit(plot)
p = plot_ly(plot, x = ~board.size, y = ~bt.runtime1, name = 'model1 BT', type = 'scatter', mode = 'lines', line = list(color = '#3F51B5', width = 2))%>%
add_trace(y = ~fc.runtime1, name = 'model1 FC', line = list(color = '#2196F3', width = 2)) %>%
add_trace(y = ~gac.runtime1, name = 'model1 GAC', line = list(color = '#B3E5FC', width = 2)) %>%
layout(title = "Propagators Scaling with Board Size",
xaxis = list(title = "Board Size"),
yaxis = list (title = "Runtime (sec)", type = "log"))
# board size x runtime, for model 1, for decreasing order, for 3 propagators (assignments/prunings with board size)
p = plot_ly(plot, x = ~board.size, y = ~bt.assignment1, name = 'model1 BT assignments', type = 'scatter', mode = 'lines', line = list(color = '#B71C1C', width = 2))%>%
add_trace(y = ~fc.assignment1, name = 'model1 FC assignments', line = list(color = 'rgb(0,255,0)', width = 2)) %>%
add_trace(y = ~fc.pruning1, name = 'model1 FC prunings', line = list(color = '#1B5E20', width = 2)) %>%
add_trace(y = ~gac.assignment1, name = 'model1 GAC assignments', line = list(color = '#2196F3', width = 2)) %>%
add_trace(y = ~gac.pruning1, name = 'model1 GAC prunings', line = list(color = '#1A237E', width = 2)) %>%
layout(title = "Assignments/Prunings Scaling with Board Size",
xaxis = list(title = "Board Size"),
yaxis = list (title = "Assignments/Prunings", type = "log"))
# board size x runtime, for model 1, for GAC, for 3 value orderings
plot = read.csv('../results/large_value_ordering.csv')
p = plot_ly(plot, x = ~board.size, y = ~dec.runtime1, name = 'GAC val_decreasing', type = 'scatter', mode = 'lines', line = list(color = 'rgb(255,0,0)', width = 2))%>%
add_trace(y = ~dec_lcv.runtime1, name = 'GAC val_decreasing_lcv', line = list(color = 'rgb(0,255,0)', width = 2)) %>%
add_trace(y = ~inc.runtime1, name = 'GAC val_increasing', line = list(color = 'rgb(0,0,255)', width = 2)) %>%
layout(title = "Models Scaling with Board Size",
xaxis = list(title = "Board Size"),
yaxis = list (title = "Runtime (sec)"))
|
directory <- "Z:/Covid-19 researches/Movement data"
setwd(directory)
library(chron)
library(gplots)
library(signal)
library(splines)
#movement_data_table <- rbind(read.csv("2020_GR_Region_Mobility_Report.csv",stringsAsFactors=FALSE),read.csv("2021_GR_Region_Mobility_Report.csv",stringsAsFactors=FALSE))
movement_data_table <- read.csv("Global_Mobility_Report.csv",stringsAsFactors=FALSE)
movement_data_table <- movement_data_table[movement_data_table[,2] == "Greece",]
movement_data_table <- movement_data_table[,c(3,9:15)]
colnames(movement_data_table) <- c(colnames(movement_data_table)[1:2],sapply(strsplit(colnames(movement_data_table)[3:dim(movement_data_table)[2]],split="_percent"),'[',1))
movement_data_table[movement_data_table[,1] == "",1] <- "Decentralized Administration of Greece total"
movement_data_table[,1] <- sapply(strsplit(movement_data_table[,1],split=" of "),'[',2)
movement_percentage_changes <- matrix(as.numeric(unlist(movement_data_table[3:dim(movement_data_table)[2]])),nrow=dim(movement_data_table)[1],byrow=FALSE)
movement_percentage_changes <- movement_data_table[,3:8]
if(!file.exists(paste(directory,"Movement correlations",sep="/")))
dir.create(paste(directory,"Movement correlations",sep="/"))
if(!file.exists(paste(directory,"Movement progression",sep="/")))
dir.create(paste(directory,"Movement progression",sep="/"))
for (i in 1:length(levels(as.factor(movement_data_table[,1]))))
{
png(file=file.path(paste(paste(getwd(),"Movement correlations",sep="/"),paste("Correlations [",levels(as.factor(movement_data_table[,1]))[i],"].png",sep=""),sep="/")),width=900,height=900)
heatmap.2(cor(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,],use="complete.obs"),Rowv=FALSE,Colv=FALSE,dendrogram="none",main=paste("Correlations for",levels(as.factor(movement_data_table[,1]))[i],sep=" "),margins=c(15,15))
dev.off()
png(file=file.path(paste(paste(getwd(),"Movement progression",sep="/"),paste("Movement data [",levels(as.factor(movement_data_table[,1]))[i],"].png",sep=""),sep="/")),width=1600,height=1200)
par(mar= c(5, 4, 7, 2) + 0.1)
plot(as.numeric(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,1]),col="red2",pch=16,xaxt="n",las=2,xlab="",ylab="% change from baseline (average of 03/01/2020 to 06/02/2020)",main=paste("Movement data [",levels(as.factor(movement_data_table[,1]))[i],"]",sep=""))
axis(1,at=seq(sum(as.integer(as.factor(movement_data_table[,1]))==i)/10)*10,labels=as.character(chron(dates.="14/02/2020",format="d/m/y") + seq(sum(as.integer(as.factor(movement_data_table[,1]))==i)/10)*10),las=2)
for (j in 2:6)
points(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,j],pch=16,col=colors()[j*24])
legend("bottomleft",legend=colnames(movement_percentage_changes),pch=15,col=c("red2",colors()[(2:6)*24]))
par(mar= c(5, 4, 4, 2) + 0.1)
dev.off()
png(file=file.path(paste(paste(getwd(),"Movement progression",sep="/"),paste("Movement data [",levels(as.factor(movement_data_table[,1]))[i],"] - smoothed.png",sep=""),sep="/")),width=1600,height=1200)
par(mar= c(5, 4, 7, 2) + 0.1)
plot(predict(lm(as.numeric(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,1])~ns(seq(length(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,1])),11)),x=seq(length(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,1]))),col="red2",type="l",lwd=3,xaxt="n",las=2,xlab="",ylab="% change from baseline (average of 03/01/2020 to 06/02/2020)",main=paste("Movement data [",levels(as.factor(movement_data_table[,1]))[i],"]",sep=""))
axis(1,at=seq(sum(as.integer(as.factor(movement_data_table[,1]))==i)/10)*10,labels=as.character(chron(dates.="14/02/2020",format="d/m/y") + seq(sum(as.integer(as.factor(movement_data_table[,1]))==i)/10)*10),las=2)
for (j in 2:6)
lines(predict(lm(as.numeric(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,j])~ns(seq(length(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,j])),11)),x=seq(length(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,j]))),lwd=3,col=colors()[j*24])
legend("bottomleft",legend=colnames(movement_percentage_changes),pch=15,col=c("red2",colors()[(2:6)*24]))
par(mar= c(5, 4, 4, 2) + 0.1)
dev.off()
}
rm(i,j)
| /Re-analysis [2021-04-11]/Movement data/Movement data analysis.R | no_license | Hector-Xavier/COVID-19_in_Greece | R | false | false | 4,409 | r | directory <- "Z:/Covid-19 researches/Movement data"
setwd(directory)
library(chron)
library(gplots)
library(signal)
library(splines)
#movement_data_table <- rbind(read.csv("2020_GR_Region_Mobility_Report.csv",stringsAsFactors=FALSE),read.csv("2021_GR_Region_Mobility_Report.csv",stringsAsFactors=FALSE))
movement_data_table <- read.csv("Global_Mobility_Report.csv",stringsAsFactors=FALSE)
movement_data_table <- movement_data_table[movement_data_table[,2] == "Greece",]
movement_data_table <- movement_data_table[,c(3,9:15)]
colnames(movement_data_table) <- c(colnames(movement_data_table)[1:2],sapply(strsplit(colnames(movement_data_table)[3:dim(movement_data_table)[2]],split="_percent"),'[',1))
movement_data_table[movement_data_table[,1] == "",1] <- "Decentralized Administration of Greece total"
movement_data_table[,1] <- sapply(strsplit(movement_data_table[,1],split=" of "),'[',2)
movement_percentage_changes <- matrix(as.numeric(unlist(movement_data_table[3:dim(movement_data_table)[2]])),nrow=dim(movement_data_table)[1],byrow=FALSE)
movement_percentage_changes <- movement_data_table[,3:8]
if(!file.exists(paste(directory,"Movement correlations",sep="/")))
dir.create(paste(directory,"Movement correlations",sep="/"))
if(!file.exists(paste(directory,"Movement progression",sep="/")))
dir.create(paste(directory,"Movement progression",sep="/"))
for (i in 1:length(levels(as.factor(movement_data_table[,1]))))
{
png(file=file.path(paste(paste(getwd(),"Movement correlations",sep="/"),paste("Correlations [",levels(as.factor(movement_data_table[,1]))[i],"].png",sep=""),sep="/")),width=900,height=900)
heatmap.2(cor(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,],use="complete.obs"),Rowv=FALSE,Colv=FALSE,dendrogram="none",main=paste("Correlations for",levels(as.factor(movement_data_table[,1]))[i],sep=" "),margins=c(15,15))
dev.off()
png(file=file.path(paste(paste(getwd(),"Movement progression",sep="/"),paste("Movement data [",levels(as.factor(movement_data_table[,1]))[i],"].png",sep=""),sep="/")),width=1600,height=1200)
par(mar= c(5, 4, 7, 2) + 0.1)
plot(as.numeric(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,1]),col="red2",pch=16,xaxt="n",las=2,xlab="",ylab="% change from baseline (average of 03/01/2020 to 06/02/2020)",main=paste("Movement data [",levels(as.factor(movement_data_table[,1]))[i],"]",sep=""))
axis(1,at=seq(sum(as.integer(as.factor(movement_data_table[,1]))==i)/10)*10,labels=as.character(chron(dates.="14/02/2020",format="d/m/y") + seq(sum(as.integer(as.factor(movement_data_table[,1]))==i)/10)*10),las=2)
for (j in 2:6)
points(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,j],pch=16,col=colors()[j*24])
legend("bottomleft",legend=colnames(movement_percentage_changes),pch=15,col=c("red2",colors()[(2:6)*24]))
par(mar= c(5, 4, 4, 2) + 0.1)
dev.off()
png(file=file.path(paste(paste(getwd(),"Movement progression",sep="/"),paste("Movement data [",levels(as.factor(movement_data_table[,1]))[i],"] - smoothed.png",sep=""),sep="/")),width=1600,height=1200)
par(mar= c(5, 4, 7, 2) + 0.1)
plot(predict(lm(as.numeric(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,1])~ns(seq(length(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,1])),11)),x=seq(length(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,1]))),col="red2",type="l",lwd=3,xaxt="n",las=2,xlab="",ylab="% change from baseline (average of 03/01/2020 to 06/02/2020)",main=paste("Movement data [",levels(as.factor(movement_data_table[,1]))[i],"]",sep=""))
axis(1,at=seq(sum(as.integer(as.factor(movement_data_table[,1]))==i)/10)*10,labels=as.character(chron(dates.="14/02/2020",format="d/m/y") + seq(sum(as.integer(as.factor(movement_data_table[,1]))==i)/10)*10),las=2)
for (j in 2:6)
lines(predict(lm(as.numeric(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,j])~ns(seq(length(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,j])),11)),x=seq(length(movement_percentage_changes[as.integer(as.factor(movement_data_table[,1]))==i,j]))),lwd=3,col=colors()[j*24])
legend("bottomleft",legend=colnames(movement_percentage_changes),pch=15,col=c("red2",colors()[(2:6)*24]))
par(mar= c(5, 4, 4, 2) + 0.1)
dev.off()
}
rm(i,j)
|
library(rgdal)
library(plotly)
library(gwtools)
library(pracma)
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# CREATE MESH INPUT FILE -----------------
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# load the mesh shapefile and write it to
cvhm_mesh_shp <- readOGR(dsn = "../gis_data/", layer = "CVHM_mesh_3310")
# create a unique list of nodes and mesh elents that point of the nodelist
ND <- matrix(data = NA, nrow = 50000, ncol = 2)
MSH <- matrix(data = NA, nrow = length(cvhm_mesh_shp), ncol = 4)
cnt_nd <- 0
for (i in 1:length(cvhm_mesh_shp)) {
if (length(cvhm_mesh_shp@polygons[[i]]@Polygons) != 1){
print(paste("There are more polygons in", i))
break
}
if (dim(cvhm_mesh_shp@polygons[[i]]@Polygons[[1]]@coords)[1] != 5){
print(paste("The nodes are not 5 in", i))
break
}
for (j in 1:4) {
x <- cvhm_mesh_shp@polygons[[i]]@Polygons[[1]]@coords[j,1]
y <- cvhm_mesh_shp@polygons[[i]]@Polygons[[1]]@coords[j,2]
if (cnt_nd == 0){
cnt_nd <- cnt_nd + 1
ND[cnt_nd,] <- c(x,y)
MSH[i,j] <- cnt_nd
}
else{
dst <- sqrt((ND[1:cnt_nd,1] - x)^2 + (ND[1:cnt_nd,2] - y)^2)
id <- which(dst < 0.1)
if (length(id) == 0){
cnt_nd <- cnt_nd + 1
ND[cnt_nd,] <- c(x,y)
MSH[i,j] <- cnt_nd
}
else{
if (length(id) > 1){
print(paste("More than one nodes have the same coordinates in element", i))
break
}
MSH[i,j] <- id
}
}
}
}
ND <- ND[1:cnt_nd,]
ND <- cbind(ND, zeros(n = dim(ND)[1],m = 1))
# Check the area of the elements
a <- vector(mode = "numeric", length = dim(MSH)[1])
for (i in 1:dim(MSH)[1]) {
a[i] <- polyarea(ND[MSH[i,],1],ND[MSH[i,],2])
if (a[i] < 0){
MSH[i,] <- MSH[i,c(1,4,3,2)]
}
}
if (!file.exists("CVHM_msh_3310.npsat") == TRUE){
gwtools::npsat.writeMesh(filename = "CVHM_msh_3310.npsat", nd = ND, msh = MSH-1)
}
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# CREATE BUFFER NODES -------------------
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Create a buffer layer of nodes to make sure that all interpolation will return a value
cvhm_buffer <- readOGR(dsn = "../gis_data/", layer = "CVHM_mesh_buffer_3310")
ND_buffer <- matrix(data = NA, nrow = 5000, ncol = 2)
cnt_nd <- 0
for (i in 1:dim(cvhm_buffer@polygons[[1]]@Polygons[[1]]@coords)[1]) {
if (cnt_nd == 0){
cnt_nd <- cnt_nd + 1
ND_buffer[cnt_nd,] <- cvhm_buffer@polygons[[1]]@Polygons[[1]]@coords[i,]
}
else{
x <- cvhm_buffer@polygons[[1]]@Polygons[[1]]@coords[i,1]
y <- cvhm_buffer@polygons[[1]]@Polygons[[1]]@coords[i,2]
dst <- sqrt((ND_buffer[1:cnt_nd,1] - x)^2 + (ND_buffer[1:cnt_nd,2] - y)^2)
id <- which(dst < 0.1)
if (length(id) == 0){
cnt_nd <- cnt_nd + 1
ND_buffer[cnt_nd,] <- c(x,y)
}
}
}
ND_buffer <- ND_buffer[1:cnt_nd,]
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# WRITE THE BOTTOM ELEVATION FILE--------------
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# load the information for the bottom of the CV
bas_active <- readOGR(dsn = "../gis_data/", layer = "BAS_active_3310")
cvhm_bottom <- readOGR(dsn = "../gis_data/", layer = "DIS_3310")
active_lin_ind <- gwtools::sub2ind(bas_active$ROW, bas_active$COLUMN_, 441)
dis_lin_ind <- gwtools::sub2ind(cvhm_bottom$ROW, cvhm_bottom$COLUMN_,441)
tmp <- match(dis_lin_ind, active_lin_ind)
dis_act_id <- which(!is.na(tmp))
View(tmp[dis_act_id])
cvhm_bottom <- subset(cvhm_bottom, !is.na(tmp))
bottom_points <- matrix(data = NA, nrow = length(cvhm_bottom), ncol = 3)
for (i in 1:length(cvhm_bottom)) {
bottom_points[i,] <- c(
mean(cvhm_bottom@polygons[[i]]@Polygons[[1]]@coords[1:4,1]),
mean(cvhm_bottom@polygons[[i]]@Polygons[[1]]@coords[1:4,2]),
cvhm_bottom@data$cvr2lay10b[i]
)
}
# assign elevations to the buffer nodes
buff_Bot_elev <- matrix(data = NA, nrow = dim(ND_buffer)[1], ncol = 3)
for (i in 1:dim(ND_buffer)[1]) {
dst <- sqrt((ND_buffer[i,1]-bottom_points[,1])^2 + (ND_buffer[i,2]-bottom_points[,2])^2)
buff_Bot_elev[i,] <- c(
ND_buffer[i,1], ND_buffer[i,2],
bottom_points[which.min(dst),3]
)
}
gwtools::npsat.WriteScattered(filename = "CVHM_Bottom_3310.npsat",PDIM = 2,
TYPE = "HOR", MODE = "SIMPLE",
DATA = rbind(bottom_points,buff_Bot_elev))
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# WRITE HYDRAULIC CONDUCTIVITY -------------
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
HK <- vector(mode = "list", length = 10)
VK <- vector(mode = "list", length = 10)
for (i in 1:10) {
HK[[i]] <- as.matrix(read.table(file = paste0("../hyd_cond/HK_lyr", i, ".txt"), header = F))
VK[[i]] <- as.matrix(read.table(file = paste0("../hyd_cond/VK_lyr", i, ".txt"), header = F))
}
HK_mat <- matrix(data = NA, nrow = dim(bottom_points)[1], ncol = 10)
VK_mat <- matrix(data = NA, nrow = dim(bottom_points)[1], ncol = 10)
lin_ind <- gwtools::sub2ind(cvhm_bottom$ROW, cvhm_bottom$COLUMN_, 441)
for (i in 1:10) {
HK_mat[,i] <- HK[[i]][lin_ind]
VK_mat[,i] <- VK[[i]][lin_ind]
}
# interpolate conductivity and elevation to the buffer nodes
interLayElev <- cvhm_bottom@data[,6:14]
interLayElev_buff <- matrix(data = NA, nrow = dim(ND_buffer)[1], ncol = dim(interLayElev)[2])
HK_buff <- matrix(data = NA, nrow = dim(ND_buffer)[1], ncol = 10)
VK_buff <- matrix(data = NA, nrow = dim(ND_buffer)[1], ncol = 10)
for (i in 1:dim(ND_buffer)[1]) {
dst <- sqrt((ND_buffer[i,1]-bottom_points[,1])^2 + (ND_buffer[i,2]-bottom_points[,2])^2)
interLayElev_buff[i,] <- as.numeric(interLayElev[which.min(dst),])
HK_buff[i,] <- as.numeric(HK_mat[which.min(dst),])
VK_buff[i,] <- as.numeric(VK_mat[which.min(dst),])
}
HKdata <- cbind(rbind(bottom_points[,1:2],buff_Bot_elev[,1:2]), c(HK_mat[,1], HK_buff[,1]))
VKdata <- cbind(rbind(bottom_points[,1:2],buff_Bot_elev[,1:2]), c(VK_mat[,1], VK_buff[,1]))
for (i in 2:10) {
HKdata <- cbind(HKdata, c(interLayElev[,i-1], interLayElev_buff[,i-1]))
HKdata <- cbind(HKdata, c(HK_mat[,i], HK_buff[,i]))
VKdata <- cbind(VKdata, c(interLayElev[,i-1], interLayElev_buff[,i-1]))
VKdata <- cbind(VKdata, c(VK_mat[,i], VK_buff[,i]))
}
# Isolate only the Conductivity values
HKtemp <- HKdata[, seq(3,21,2) ]
VKtemp <- VKdata[, seq(3,21,2) ]
# There are in here and there zero conductivity values.
# We will replace those values with the nonzero value of the above layer
for (i in 1:dim(HKtemp)[2]) {
zr_id <- which(HKtemp[,i] == 0)
if (i == 0 & length(zr_id) > 0){
print(paste("The first layer has zero HK values"))
}
else{
if (length(zr_id) == 0){
next
}
HKtemp[zr_id,i] <- HKtemp[zr_id,i-1]
}
zr_id <- which(VKtemp[,i] == 0)
if (i == 0 & length(zr_id) > 0){
print(paste("The first layer has zero VK values"))
}
else{
if (length(zr_id) == 0){
next
}
VKtemp[zr_id,i] <- VKtemp[zr_id,i-1]
}
}
# Substitute the modified values
HKdata[, seq(3,21,2) ] <- HKtemp
VKdata[, seq(3,21,2) ] <- VKtemp
gwtools::npsat.WriteScattered(filename = "CVHM_HK_3310.npsat",
PDIM = 2,TYPE = "FULL",MODE = "STRATIFIED", DATA = HKdata)
gwtools::npsat.WriteScattered(filename = "CVHM_VK_3310.npsat",
PDIM = 2,TYPE = "FULL",MODE = "STRATIFIED", DATA = VKdata)
{# Test HK
HK <- gwtools::npsat.ReadScattered("CVHM_HK_3310.npsat")
}
| /Rcvhm/PrepareNPSATproperties.R | no_license | giorgk/CVHM_NPSAT | R | false | false | 7,348 | r | library(rgdal)
library(plotly)
library(gwtools)
library(pracma)
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# CREATE MESH INPUT FILE -----------------
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# load the mesh shapefile and write it to
cvhm_mesh_shp <- readOGR(dsn = "../gis_data/", layer = "CVHM_mesh_3310")
# create a unique list of nodes and mesh elents that point of the nodelist
ND <- matrix(data = NA, nrow = 50000, ncol = 2)
MSH <- matrix(data = NA, nrow = length(cvhm_mesh_shp), ncol = 4)
cnt_nd <- 0
for (i in 1:length(cvhm_mesh_shp)) {
if (length(cvhm_mesh_shp@polygons[[i]]@Polygons) != 1){
print(paste("There are more polygons in", i))
break
}
if (dim(cvhm_mesh_shp@polygons[[i]]@Polygons[[1]]@coords)[1] != 5){
print(paste("The nodes are not 5 in", i))
break
}
for (j in 1:4) {
x <- cvhm_mesh_shp@polygons[[i]]@Polygons[[1]]@coords[j,1]
y <- cvhm_mesh_shp@polygons[[i]]@Polygons[[1]]@coords[j,2]
if (cnt_nd == 0){
cnt_nd <- cnt_nd + 1
ND[cnt_nd,] <- c(x,y)
MSH[i,j] <- cnt_nd
}
else{
dst <- sqrt((ND[1:cnt_nd,1] - x)^2 + (ND[1:cnt_nd,2] - y)^2)
id <- which(dst < 0.1)
if (length(id) == 0){
cnt_nd <- cnt_nd + 1
ND[cnt_nd,] <- c(x,y)
MSH[i,j] <- cnt_nd
}
else{
if (length(id) > 1){
print(paste("More than one nodes have the same coordinates in element", i))
break
}
MSH[i,j] <- id
}
}
}
}
ND <- ND[1:cnt_nd,]
ND <- cbind(ND, zeros(n = dim(ND)[1],m = 1))
# Check the area of the elements
a <- vector(mode = "numeric", length = dim(MSH)[1])
for (i in 1:dim(MSH)[1]) {
a[i] <- polyarea(ND[MSH[i,],1],ND[MSH[i,],2])
if (a[i] < 0){
MSH[i,] <- MSH[i,c(1,4,3,2)]
}
}
if (!file.exists("CVHM_msh_3310.npsat") == TRUE){
gwtools::npsat.writeMesh(filename = "CVHM_msh_3310.npsat", nd = ND, msh = MSH-1)
}
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# CREATE BUFFER NODES -------------------
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Create a buffer layer of nodes to make sure that all interpolation will return a value
cvhm_buffer <- readOGR(dsn = "../gis_data/", layer = "CVHM_mesh_buffer_3310")
ND_buffer <- matrix(data = NA, nrow = 5000, ncol = 2)
cnt_nd <- 0
for (i in 1:dim(cvhm_buffer@polygons[[1]]@Polygons[[1]]@coords)[1]) {
if (cnt_nd == 0){
cnt_nd <- cnt_nd + 1
ND_buffer[cnt_nd,] <- cvhm_buffer@polygons[[1]]@Polygons[[1]]@coords[i,]
}
else{
x <- cvhm_buffer@polygons[[1]]@Polygons[[1]]@coords[i,1]
y <- cvhm_buffer@polygons[[1]]@Polygons[[1]]@coords[i,2]
dst <- sqrt((ND_buffer[1:cnt_nd,1] - x)^2 + (ND_buffer[1:cnt_nd,2] - y)^2)
id <- which(dst < 0.1)
if (length(id) == 0){
cnt_nd <- cnt_nd + 1
ND_buffer[cnt_nd,] <- c(x,y)
}
}
}
ND_buffer <- ND_buffer[1:cnt_nd,]
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# WRITE THE BOTTOM ELEVATION FILE--------------
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# load the information for the bottom of the CV
bas_active <- readOGR(dsn = "../gis_data/", layer = "BAS_active_3310")
cvhm_bottom <- readOGR(dsn = "../gis_data/", layer = "DIS_3310")
active_lin_ind <- gwtools::sub2ind(bas_active$ROW, bas_active$COLUMN_, 441)
dis_lin_ind <- gwtools::sub2ind(cvhm_bottom$ROW, cvhm_bottom$COLUMN_,441)
tmp <- match(dis_lin_ind, active_lin_ind)
dis_act_id <- which(!is.na(tmp))
View(tmp[dis_act_id])
cvhm_bottom <- subset(cvhm_bottom, !is.na(tmp))
bottom_points <- matrix(data = NA, nrow = length(cvhm_bottom), ncol = 3)
for (i in 1:length(cvhm_bottom)) {
bottom_points[i,] <- c(
mean(cvhm_bottom@polygons[[i]]@Polygons[[1]]@coords[1:4,1]),
mean(cvhm_bottom@polygons[[i]]@Polygons[[1]]@coords[1:4,2]),
cvhm_bottom@data$cvr2lay10b[i]
)
}
# assign elevations to the buffer nodes
buff_Bot_elev <- matrix(data = NA, nrow = dim(ND_buffer)[1], ncol = 3)
for (i in 1:dim(ND_buffer)[1]) {
dst <- sqrt((ND_buffer[i,1]-bottom_points[,1])^2 + (ND_buffer[i,2]-bottom_points[,2])^2)
buff_Bot_elev[i,] <- c(
ND_buffer[i,1], ND_buffer[i,2],
bottom_points[which.min(dst),3]
)
}
gwtools::npsat.WriteScattered(filename = "CVHM_Bottom_3310.npsat",PDIM = 2,
TYPE = "HOR", MODE = "SIMPLE",
DATA = rbind(bottom_points,buff_Bot_elev))
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# WRITE HYDRAULIC CONDUCTIVITY -------------
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
HK <- vector(mode = "list", length = 10)
VK <- vector(mode = "list", length = 10)
for (i in 1:10) {
HK[[i]] <- as.matrix(read.table(file = paste0("../hyd_cond/HK_lyr", i, ".txt"), header = F))
VK[[i]] <- as.matrix(read.table(file = paste0("../hyd_cond/VK_lyr", i, ".txt"), header = F))
}
HK_mat <- matrix(data = NA, nrow = dim(bottom_points)[1], ncol = 10)
VK_mat <- matrix(data = NA, nrow = dim(bottom_points)[1], ncol = 10)
lin_ind <- gwtools::sub2ind(cvhm_bottom$ROW, cvhm_bottom$COLUMN_, 441)
for (i in 1:10) {
HK_mat[,i] <- HK[[i]][lin_ind]
VK_mat[,i] <- VK[[i]][lin_ind]
}
# interpolate conductivity and elevation to the buffer nodes
interLayElev <- cvhm_bottom@data[,6:14]
interLayElev_buff <- matrix(data = NA, nrow = dim(ND_buffer)[1], ncol = dim(interLayElev)[2])
HK_buff <- matrix(data = NA, nrow = dim(ND_buffer)[1], ncol = 10)
VK_buff <- matrix(data = NA, nrow = dim(ND_buffer)[1], ncol = 10)
for (i in 1:dim(ND_buffer)[1]) {
dst <- sqrt((ND_buffer[i,1]-bottom_points[,1])^2 + (ND_buffer[i,2]-bottom_points[,2])^2)
interLayElev_buff[i,] <- as.numeric(interLayElev[which.min(dst),])
HK_buff[i,] <- as.numeric(HK_mat[which.min(dst),])
VK_buff[i,] <- as.numeric(VK_mat[which.min(dst),])
}
HKdata <- cbind(rbind(bottom_points[,1:2],buff_Bot_elev[,1:2]), c(HK_mat[,1], HK_buff[,1]))
VKdata <- cbind(rbind(bottom_points[,1:2],buff_Bot_elev[,1:2]), c(VK_mat[,1], VK_buff[,1]))
for (i in 2:10) {
HKdata <- cbind(HKdata, c(interLayElev[,i-1], interLayElev_buff[,i-1]))
HKdata <- cbind(HKdata, c(HK_mat[,i], HK_buff[,i]))
VKdata <- cbind(VKdata, c(interLayElev[,i-1], interLayElev_buff[,i-1]))
VKdata <- cbind(VKdata, c(VK_mat[,i], VK_buff[,i]))
}
# Isolate only the Conductivity values
HKtemp <- HKdata[, seq(3,21,2) ]
VKtemp <- VKdata[, seq(3,21,2) ]
# There are in here and there zero conductivity values.
# We will replace those values with the nonzero value of the above layer
for (i in 1:dim(HKtemp)[2]) {
zr_id <- which(HKtemp[,i] == 0)
if (i == 0 & length(zr_id) > 0){
print(paste("The first layer has zero HK values"))
}
else{
if (length(zr_id) == 0){
next
}
HKtemp[zr_id,i] <- HKtemp[zr_id,i-1]
}
zr_id <- which(VKtemp[,i] == 0)
if (i == 0 & length(zr_id) > 0){
print(paste("The first layer has zero VK values"))
}
else{
if (length(zr_id) == 0){
next
}
VKtemp[zr_id,i] <- VKtemp[zr_id,i-1]
}
}
# Substitute the modified values
HKdata[, seq(3,21,2) ] <- HKtemp
VKdata[, seq(3,21,2) ] <- VKtemp
gwtools::npsat.WriteScattered(filename = "CVHM_HK_3310.npsat",
PDIM = 2,TYPE = "FULL",MODE = "STRATIFIED", DATA = HKdata)
gwtools::npsat.WriteScattered(filename = "CVHM_VK_3310.npsat",
PDIM = 2,TYPE = "FULL",MODE = "STRATIFIED", DATA = VKdata)
{# Test HK
HK <- gwtools::npsat.ReadScattered("CVHM_HK_3310.npsat")
}
|
setwd("~/study/GettingData/work_scripts/course project")
if(!file.exists("./data")){dir.create("./data")}
#setting pathes to read files
#folders
rootPath <- "./data/"
testPath <- "./data/test/"
trainPath <- "./data/train/"
#test files
subjectTestPath <- paste(testPath, "subject_test.txt", sep="")
xTestPath <- paste(testPath, "X_test.txt", sep="")
yTestPath <- paste(testPath, "y_test.txt", sep="")
#training files
subjectTrainPath <- paste(trainPath, "subject_train.txt", sep="")
xTrainPath <- paste(trainPath, "X_train.txt", sep="")
yTrainPath <- paste(trainPath, "y_train.txt", sep="")
#additional files
featuresPath <- paste(rootPath, "features.txt", sep="")
activitiesPath <- paste(rootPath, "activity_labels.txt", sep="")
# reading text files to varialbles
subjectTest <- read.table(subjectTestPath)
xTest <- read.table(xTestPath)
yTest <- read.table(yTestPath)
subjectTrain <- read.table(subjectTrainPath)
xTrain <- read.table(xTrainPath)
yTrain <- read.table(yTrainPath)
features <-read.table(featuresPath)
activities <- read.table(activitiesPath)
#naming columns in datasets
colnames(xTest) <- features[,2]
colnames(xTrain) <- features[,2]
#working with activities, merging names with ids
#test set
yTest$id <- 1:nrow(yTest)
yTest1 <- merge (yTest, activities, by.x="V1", by.y="V1", sort=FALSE)
yTest1 <- yTest1[order(yt1$id), ]
#training set
yTrain$id <- 1:nrow(yTrain)
yTrain1 <- merge (yTrain, activities, by.x="V1", by.y="V1", sort=FALSE)
yTrain1 <- yTrain1[order(yTrain1$id), ]
#Adding activity reference to xTrain and xTest
xTrain$Activity <- yTrain1$V2
xTest$Activity <-yTest1$V2
#Adding subject references
xTrain$Subject <- subjectTrain$V1
xTest$Subject <- subjectTest$V1
#merging to one dataframe
m <- rbind(xTrain, xTest)
#filtering out not required columns, we will keep mean, standard deviation, activity and subject
# as I assume that activity and subject are required for future operations
library(dplyr)
# fixing column names to avoid duplication error
valid_column_names <- make.names(names=names(m), unique=TRUE, allow_ = TRUE)
names(m) <- valid_column_names
mt <- select(m, Activity, Subject, contains("Mean"), contains("std"))
#making new dataset with average of each variable for each activity and each subject
# setting new tidy dataframe structure
d5names <- names(mt)
d5 <- data.frame()
n <-30 #number of subjects
s <-nrow(activities)
l<- length(d5names) - 2
# calculating average for each combination of Activity and Subject and saving data in d5 dataframe
for (i in 1:s)
{
for (j in 1:n)
{
tdata <- filter(mt, Activity==activities[i,2] & Subject==j)
tdata1 <- select(tdata, -(Activity:Subject))
trow <-data.frame()
trow[1,1] <- activities[i,2]
trow[1,2] <- j
tm <- colMeans(tdata1, na.rm = FALSE)
for (k in 1:l)
{
trow[1,2+k] <- tm[k]
}
d5<-rbind(d5, trow[1,])
}
}
#setting meaningful descriptive names to columns
names(d5) <- d5names
write.csv(d5, file=paste(rootPath, "d5.csv", sep=""))
| /run_analysis.R | no_license | alevashov/getdatacourseproject | R | false | false | 3,219 | r | setwd("~/study/GettingData/work_scripts/course project")
if(!file.exists("./data")){dir.create("./data")}
#setting pathes to read files
#folders
rootPath <- "./data/"
testPath <- "./data/test/"
trainPath <- "./data/train/"
#test files
subjectTestPath <- paste(testPath, "subject_test.txt", sep="")
xTestPath <- paste(testPath, "X_test.txt", sep="")
yTestPath <- paste(testPath, "y_test.txt", sep="")
#training files
subjectTrainPath <- paste(trainPath, "subject_train.txt", sep="")
xTrainPath <- paste(trainPath, "X_train.txt", sep="")
yTrainPath <- paste(trainPath, "y_train.txt", sep="")
#additional files
featuresPath <- paste(rootPath, "features.txt", sep="")
activitiesPath <- paste(rootPath, "activity_labels.txt", sep="")
# reading text files to varialbles
subjectTest <- read.table(subjectTestPath)
xTest <- read.table(xTestPath)
yTest <- read.table(yTestPath)
subjectTrain <- read.table(subjectTrainPath)
xTrain <- read.table(xTrainPath)
yTrain <- read.table(yTrainPath)
features <-read.table(featuresPath)
activities <- read.table(activitiesPath)
#naming columns in datasets
colnames(xTest) <- features[,2]
colnames(xTrain) <- features[,2]
#working with activities, merging names with ids
#test set
yTest$id <- 1:nrow(yTest)
yTest1 <- merge (yTest, activities, by.x="V1", by.y="V1", sort=FALSE)
yTest1 <- yTest1[order(yt1$id), ]
#training set
yTrain$id <- 1:nrow(yTrain)
yTrain1 <- merge (yTrain, activities, by.x="V1", by.y="V1", sort=FALSE)
yTrain1 <- yTrain1[order(yTrain1$id), ]
#Adding activity reference to xTrain and xTest
xTrain$Activity <- yTrain1$V2
xTest$Activity <-yTest1$V2
#Adding subject references
xTrain$Subject <- subjectTrain$V1
xTest$Subject <- subjectTest$V1
#merging to one dataframe
m <- rbind(xTrain, xTest)
#filtering out not required columns, we will keep mean, standard deviation, activity and subject
# as I assume that activity and subject are required for future operations
library(dplyr)
# fixing column names to avoid duplication error
valid_column_names <- make.names(names=names(m), unique=TRUE, allow_ = TRUE)
names(m) <- valid_column_names
mt <- select(m, Activity, Subject, contains("Mean"), contains("std"))
#making new dataset with average of each variable for each activity and each subject
# setting new tidy dataframe structure
d5names <- names(mt)
d5 <- data.frame()
n <-30 #number of subjects
s <-nrow(activities)
l<- length(d5names) - 2
# calculating average for each combination of Activity and Subject and saving data in d5 dataframe
for (i in 1:s)
{
for (j in 1:n)
{
tdata <- filter(mt, Activity==activities[i,2] & Subject==j)
tdata1 <- select(tdata, -(Activity:Subject))
trow <-data.frame()
trow[1,1] <- activities[i,2]
trow[1,2] <- j
tm <- colMeans(tdata1, na.rm = FALSE)
for (k in 1:l)
{
trow[1,2+k] <- tm[k]
}
d5<-rbind(d5, trow[1,])
}
}
#setting meaningful descriptive names to columns
names(d5) <- d5names
write.csv(d5, file=paste(rootPath, "d5.csv", sep=""))
|
###final.mod-prediction model
###baselineVar-baseline covariates
###numCarlo-number of monte carlo runs use in G computation
###here I use A0-to denote the first treatment indicator
###treat-treatment indicator (for the treatment that you are interested in estimating the associated potential outcomes)
###treat.varname-variable name denoting first treatment
###in the simulation studies, A0-denotes treatment at first time point
gcomputeFunc=function(final.mod, data, baselineVar, numCarlo, treat.varname, outcome.varname, treat){
###############################################################
### generate baseline covariates from empirical distributions########
baselineSim=NULL
for(ind in 1:length(baselineVar)){
draw_g=NULL
draw_g=cbind(draw_g, rnorm(numCarlo, mean=mean(data[,baselineVar[ind]]), sd=sd(data[,baselineVar[ind]])) )
colnames(draw_g)=baselineVar[ind]
baselineSim=cbind(baselineSim, draw_g)
}
baselineSim=data.frame(baselineSim, A0_g=rep(treat, numCarlo))
names(baselineSim)[which(names(baselineSim) == "A0_g")]=treat.varname
####squared terms ###############
baselineSim[ , "L1_sq"] = baselineSim[, "L1"]^2
baselineSim[ , "L2_sq"] = baselineSim[, "L2"]^2
baselineSim[ , "L3_sq"] = baselineSim[, "L3"]^2
baselineSim[, "L1L2"] = baselineSim[, "L1"] * baselineSim[, "L2"]
###### generate the final outcome of interest Y
includeVar = NULL
includeVar = names(final.mod$coef)[-c(1)]
includeDesign=NULL
for(ind in 1:length(includeVar)){
includeDesign=cbind(includeDesign, baselineSim[, includeVar[ind]])
}
Y_mean=(cbind(rep(1,numCarlo), includeDesign)%*% final.mod$coef)
Y_g=rnorm(numCarlo, mean=Y_mean, sd=summary(final.mod)$sigma)
baselineSim = data.frame(baselineSim, Y=Y_g)
names(baselineSim)[which(names(baselineSim) == "Y")] = outcome.varname
return( baselineSim)
}
| /oneTimePointSimulation/Functions/gcomputeFunc.R | no_license | jasa-acs/Penalized-Spline-of-Propensity-Methods-for-Treatment-Comparison | R | false | false | 1,944 | r |
###final.mod-prediction model
###baselineVar-baseline covariates
###numCarlo-number of monte carlo runs use in G computation
###here I use A0-to denote the first treatment indicator
###treat-treatment indicator (for the treatment that you are interested in estimating the associated potential outcomes)
###treat.varname-variable name denoting first treatment
###in the simulation studies, A0-denotes treatment at first time point
gcomputeFunc=function(final.mod, data, baselineVar, numCarlo, treat.varname, outcome.varname, treat){
###############################################################
### generate baseline covariates from empirical distributions########
baselineSim=NULL
for(ind in 1:length(baselineVar)){
draw_g=NULL
draw_g=cbind(draw_g, rnorm(numCarlo, mean=mean(data[,baselineVar[ind]]), sd=sd(data[,baselineVar[ind]])) )
colnames(draw_g)=baselineVar[ind]
baselineSim=cbind(baselineSim, draw_g)
}
baselineSim=data.frame(baselineSim, A0_g=rep(treat, numCarlo))
names(baselineSim)[which(names(baselineSim) == "A0_g")]=treat.varname
####squared terms ###############
baselineSim[ , "L1_sq"] = baselineSim[, "L1"]^2
baselineSim[ , "L2_sq"] = baselineSim[, "L2"]^2
baselineSim[ , "L3_sq"] = baselineSim[, "L3"]^2
baselineSim[, "L1L2"] = baselineSim[, "L1"] * baselineSim[, "L2"]
###### generate the final outcome of interest Y
includeVar = NULL
includeVar = names(final.mod$coef)[-c(1)]
includeDesign=NULL
for(ind in 1:length(includeVar)){
includeDesign=cbind(includeDesign, baselineSim[, includeVar[ind]])
}
Y_mean=(cbind(rep(1,numCarlo), includeDesign)%*% final.mod$coef)
Y_g=rnorm(numCarlo, mean=Y_mean, sd=summary(final.mod)$sigma)
baselineSim = data.frame(baselineSim, Y=Y_g)
names(baselineSim)[which(names(baselineSim) == "Y")] = outcome.varname
return( baselineSim)
}
|
prep_title_fun <- function(title_cfg){
plot_fun <- function(){
# coordinate space (edges, width, height)
coord_space <- par()$usr
font_y_multiplier <- 1.2 # some extra spacing for Abel
str_height <- strheight(title_cfg$subtitle)
title_x <- coord_space[1] + title_cfg$x_pos * diff(coord_space[1:2])
title_y <- coord_space[3] + title_cfg$y_pos * diff(coord_space[3:4])
text(x = title_x, y = title_y, labels = title_cfg$main,
cex = title_cfg$main_cex, pos = 4, col = title_cfg$main_col)
if(!is.null(title_cfg$subtitle)) {
title_y_sub <- title_y - str_height*title_cfg$main_cex*font_y_multiplier
text(x = title_x, y = title_y_sub, labels = title_cfg$subtitle,
cex = title_cfg$sub_cex, pos = 4, col = title_cfg$sub_col)
}
if(!is.null(title_cfg$footnote)) {
start_footnote <- title_y_sub - str_height*title_cfg$sub_cex*font_y_multiplier
for(i in 1:length(title_cfg$footnote)) {
title_y_foot <- start_footnote - (i-1)*str_height*1.6*title_cfg$foot_cex
text(x = title_x, y = title_y_foot,
labels = paste(title_cfg$footnote[i], collapse=""),
cex = title_cfg$foot_cex, pos = 4, col = title_cfg$foot_col)
}
}
}
return(plot_fun)
}
| /6_visualize/src/prep_title_fun.R | no_license | mhines-usgs/gage-conditions-gif | R | false | false | 1,268 | r |
prep_title_fun <- function(title_cfg){
plot_fun <- function(){
# coordinate space (edges, width, height)
coord_space <- par()$usr
font_y_multiplier <- 1.2 # some extra spacing for Abel
str_height <- strheight(title_cfg$subtitle)
title_x <- coord_space[1] + title_cfg$x_pos * diff(coord_space[1:2])
title_y <- coord_space[3] + title_cfg$y_pos * diff(coord_space[3:4])
text(x = title_x, y = title_y, labels = title_cfg$main,
cex = title_cfg$main_cex, pos = 4, col = title_cfg$main_col)
if(!is.null(title_cfg$subtitle)) {
title_y_sub <- title_y - str_height*title_cfg$main_cex*font_y_multiplier
text(x = title_x, y = title_y_sub, labels = title_cfg$subtitle,
cex = title_cfg$sub_cex, pos = 4, col = title_cfg$sub_col)
}
if(!is.null(title_cfg$footnote)) {
start_footnote <- title_y_sub - str_height*title_cfg$sub_cex*font_y_multiplier
for(i in 1:length(title_cfg$footnote)) {
title_y_foot <- start_footnote - (i-1)*str_height*1.6*title_cfg$foot_cex
text(x = title_x, y = title_y_foot,
labels = paste(title_cfg$footnote[i], collapse=""),
cex = title_cfg$foot_cex, pos = 4, col = title_cfg$foot_col)
}
}
}
return(plot_fun)
}
|
#' @name data_mrn_raw
#'
#' @title Example of mrn.txt output from RPDR.
#'
#' @description A mrn.txt output from RPDR loaded into a data table in R using \emph{data.table::fread()}.
#'
#' **NOTE**: Due to potential issues with PHI and PPI, the example datasets can be downloaded from the
#' Partners Gitlab repository under *parserpdr-sample-data*.
#'
#' @docType data
#'
#' @usage data_mrn_raw
#'
#' @format data.table
#'
#' @return data table, imported from mrn.txt
#' \describe{
#' \item{IncomingId}{numeric, Patient identifier, usually the EMPI.}
#' \item{IncomingSite}{string, Source of identifier, e.g. EMP for Enterprise Master Patient Index, MGH for Mass General Hospital.}
#' \item{Status}{string, Status of the record.}
#' \item{Enterprise_Master_Patient_Index}{numeric, Unique Partners-wide identifier assigned to the patient used to consolidate patient information.}
#' \item{EPIC_PMRN}{numeric, Epic medical record number. This value is unique across Epic instances within the Partners network.}
#' \item{MGH_MRN}{numeric, Unique Medical Record Number for Mass General Hospital.}
#' \item{BWH_MRN}{numeric, Unique Medical Record Number for Brigham and Women's Hospital.}
#' \item{FH_MRN}{numeric, Unique Medical Record Number for Faulkner Hospital.}
#' \item{SRH_MRN}{numeric, Unique Medical Record Number for Spaulding Rehabilitation Hospital.}
#' \item{NWH_MRN}{numeric, Unique Medical Record Number for Newton-Wellesley Hospital.}
#' \item{NSMC_MRN}{numeric, Unique Medical Record Number for North Shore Medical Center.}
#' \item{MCL_MRN}{numeric, Unique Medical Record Number for McLean Hospital.}
#' \item{MEE_MRN}{numeric, Unique Medical Record Number for Mass Eye and Ear.}
#' \item{DFC_MRN}{numeric, Unique Medical Record Number for Dana Farber Cancer center.}
#' \item{WDH_MRN}{numeric, Unique Medical Record Number for Wentworth-Douglass Hospital.}
#' }
#'
#' @encoding UTF-8
NULL
| /R/data_mrn_raw.R | no_license | yuluc/parseRPDR | R | false | false | 1,922 | r | #' @name data_mrn_raw
#'
#' @title Example of mrn.txt output from RPDR.
#'
#' @description A mrn.txt output from RPDR loaded into a data table in R using \emph{data.table::fread()}.
#'
#' **NOTE**: Due to potential issues with PHI and PPI, the example datasets can be downloaded from the
#' Partners Gitlab repository under *parserpdr-sample-data*.
#'
#' @docType data
#'
#' @usage data_mrn_raw
#'
#' @format data.table
#'
#' @return data table, imported from mrn.txt
#' \describe{
#' \item{IncomingId}{numeric, Patient identifier, usually the EMPI.}
#' \item{IncomingSite}{string, Source of identifier, e.g. EMP for Enterprise Master Patient Index, MGH for Mass General Hospital.}
#' \item{Status}{string, Status of the record.}
#' \item{Enterprise_Master_Patient_Index}{numeric, Unique Partners-wide identifier assigned to the patient used to consolidate patient information.}
#' \item{EPIC_PMRN}{numeric, Epic medical record number. This value is unique across Epic instances within the Partners network.}
#' \item{MGH_MRN}{numeric, Unique Medical Record Number for Mass General Hospital.}
#' \item{BWH_MRN}{numeric, Unique Medical Record Number for Brigham and Women's Hospital.}
#' \item{FH_MRN}{numeric, Unique Medical Record Number for Faulkner Hospital.}
#' \item{SRH_MRN}{numeric, Unique Medical Record Number for Spaulding Rehabilitation Hospital.}
#' \item{NWH_MRN}{numeric, Unique Medical Record Number for Newton-Wellesley Hospital.}
#' \item{NSMC_MRN}{numeric, Unique Medical Record Number for North Shore Medical Center.}
#' \item{MCL_MRN}{numeric, Unique Medical Record Number for McLean Hospital.}
#' \item{MEE_MRN}{numeric, Unique Medical Record Number for Mass Eye and Ear.}
#' \item{DFC_MRN}{numeric, Unique Medical Record Number for Dana Farber Cancer center.}
#' \item{WDH_MRN}{numeric, Unique Medical Record Number for Wentworth-Douglass Hospital.}
#' }
#'
#' @encoding UTF-8
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hdma2.R
\name{wrap_mediation}
\alias{wrap_mediation}
\title{Run mediation analysis for a set of markers}
\usage{
wrap_mediation(qval, X, Y, M, covar = NULL, U = NULL, FDR = 0.1, sims = 3, ...)
}
\arguments{
\item{qval}{set of qValues from max2() function}
\item{X}{an explanatory variable matrix with n rows and d columns.
Each column corresponds to a distinct explanatory variable (Exposure).
Explanatory variables must be encoded as numeric variables.}
\item{Y}{an explanatory variable matrix with n rows and d columns.
Each column corresponds to a distinct explanatory variable (Outcome).
Explanatory variables must be encoded as numeric variables.}
\item{M}{a response variable matrix with n rows and p columns.
Each column corresponds to a beta-normalized methylation profile.
Response variables must be encoded as numeric. No NAs allowed.}
\item{covar}{set of covariable, must be numeric.}
\item{U}{set of latent factors from mEWAS() function}
\item{FDR}{FDR threshold to pass markers in mediation analysis}
\item{sims}{number of Monte Carlo draws for nonparametric bootstrap or quasi-Bayesian approximation.
10000 is recommended.}
\item{...}{argument of the mediate function from the mediation package}
}
\value{
Tables of results of mediation analyzes for markers with a qValue below the FDR threshold.
Indirect effect (ACME - average causal mediation effect), ADE (average direct effect),
PM (proportion mediated) and TE (total effect). Composition of tables: estimated effect,
confidence interval and mediation pValue.
We also return, We also return the results of the linear regressions.
The xm table corresponds to the regressions of X on Mi and
the my table to the regressions of Y on Mi knowing X.
With Mi corresponding to the different CpGs tested.
}
\description{
Estimate various quantities for causal mediation analysis for each
significant markers, including average causal mediation effects
(indirect effect), average direct effects, proportions mediated,
and total effect.
}
\details{
We use the mediate() function of the mediation package on the set of markers having a qValue lower
than the FDR threshold. This function makes it possible to estimate their indirect effects and to
test their significance.
}
\examples{
library(hdma2)
data(example)
# Run mEWAS
res <- mEWAS(X = example$X, Y = example$Y, M = example$M, K = 5)
# Keep latent factors for mediation
U <- res$U2
# Run max2
res <- max2(pval1 = res$pValue[, 1], pval2 = res$pValue[, 2])
# Run mediation (only 3 simulations for estimate and test indirect effect)
res <- wrap_mediation(qval = res$qval,
X = example$X,
Y = example$Y,
M = example$M,
U = U, sims = 3)
# Plot summary
plot_summary_ACME(res$ACME)
plot_summary_med(res)
}
\author{
Basile Jumentier
}
| /man/wrap_mediation.Rd | no_license | jumentib/hdma2 | R | false | true | 2,951 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hdma2.R
\name{wrap_mediation}
\alias{wrap_mediation}
\title{Run mediation analysis for a set of markers}
\usage{
wrap_mediation(qval, X, Y, M, covar = NULL, U = NULL, FDR = 0.1, sims = 3, ...)
}
\arguments{
\item{qval}{set of qValues from max2() function}
\item{X}{an explanatory variable matrix with n rows and d columns.
Each column corresponds to a distinct explanatory variable (Exposure).
Explanatory variables must be encoded as numeric variables.}
\item{Y}{an explanatory variable matrix with n rows and d columns.
Each column corresponds to a distinct explanatory variable (Outcome).
Explanatory variables must be encoded as numeric variables.}
\item{M}{a response variable matrix with n rows and p columns.
Each column corresponds to a beta-normalized methylation profile.
Response variables must be encoded as numeric. No NAs allowed.}
\item{covar}{set of covariable, must be numeric.}
\item{U}{set of latent factors from mEWAS() function}
\item{FDR}{FDR threshold to pass markers in mediation analysis}
\item{sims}{number of Monte Carlo draws for nonparametric bootstrap or quasi-Bayesian approximation.
10000 is recommended.}
\item{...}{argument of the mediate function from the mediation package}
}
\value{
Tables of results of mediation analyzes for markers with a qValue below the FDR threshold.
Indirect effect (ACME - average causal mediation effect), ADE (average direct effect),
PM (proportion mediated) and TE (total effect). Composition of tables: estimated effect,
confidence interval and mediation pValue.
We also return, We also return the results of the linear regressions.
The xm table corresponds to the regressions of X on Mi and
the my table to the regressions of Y on Mi knowing X.
With Mi corresponding to the different CpGs tested.
}
\description{
Estimate various quantities for causal mediation analysis for each
significant markers, including average causal mediation effects
(indirect effect), average direct effects, proportions mediated,
and total effect.
}
\details{
We use the mediate() function of the mediation package on the set of markers having a qValue lower
than the FDR threshold. This function makes it possible to estimate their indirect effects and to
test their significance.
}
\examples{
library(hdma2)
data(example)
# Run mEWAS
res <- mEWAS(X = example$X, Y = example$Y, M = example$M, K = 5)
# Keep latent factors for mediation
U <- res$U2
# Run max2
res <- max2(pval1 = res$pValue[, 1], pval2 = res$pValue[, 2])
# Run mediation (only 3 simulations for estimate and test indirect effect)
res <- wrap_mediation(qval = res$qval,
X = example$X,
Y = example$Y,
M = example$M,
U = U, sims = 3)
# Plot summary
plot_summary_ACME(res$ACME)
plot_summary_med(res)
}
\author{
Basile Jumentier
}
|
# entire data frame
data_firstread <- read.csv("household_power_consumption.txt", header=TRUE, sep=';',
na.strings="?",
stringsAsFactors=FALSE, dec="." )
# Date class update
data_firstread$Date <- as.Date(data_firstread$Date, format="%d/%m/%Y")
#summary(data_firstread)
#str(data_firstread)
# subset of data - only required data rows
data_subset <- subset(data_firstread,
subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
#str(data_subset)
#summary(data_subset)
## Converting dates
date_time <- as.POSIXct(paste(as.Date(data_subset$Date), data_subset$Time))
#str(date_time)
#str(data_subset)
#format conversion as.numeric
GAPower <- as.numeric(data_subset$Global_active_power)
#str(GAPower)
# creaing Graphic Device .png file
png("plot2.png")
#str(date_time)
plot(date_time, GAPower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() | /plot2.R | no_license | mahuq/ExData_Plotting1 | R | false | false | 941 | r | # entire data frame
data_firstread <- read.csv("household_power_consumption.txt", header=TRUE, sep=';',
na.strings="?",
stringsAsFactors=FALSE, dec="." )
# Date class update
data_firstread$Date <- as.Date(data_firstread$Date, format="%d/%m/%Y")
#summary(data_firstread)
#str(data_firstread)
# subset of data - only required data rows
data_subset <- subset(data_firstread,
subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
#str(data_subset)
#summary(data_subset)
## Converting dates
date_time <- as.POSIXct(paste(as.Date(data_subset$Date), data_subset$Time))
#str(date_time)
#str(data_subset)
#format conversion as.numeric
GAPower <- as.numeric(data_subset$Global_active_power)
#str(GAPower)
# creaing Graphic Device .png file
png("plot2.png")
#str(date_time)
plot(date_time, GAPower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() |
###############################################
# Code for Decompression Census data cleaning #
###############################################
#####################
#Import libraries
library(data.table)
library(stringr)
library(dplyr)
transpose_data <- function(dat){
headers <- c("shift", "lane", "clicker", "skipVeto", "birth", "resideNumeric", "residence_details", "gender", "attend_decom",
"attend_burn", "nbburns")
dat <- data.frame(t(dat[,-c(1,2)]))
dat <- dat[, 1:length(headers)]
names(dat) <- headers
dat
}
#Apply over file paths to get a list of all data sets
filePaths <- list.files("~/Documents/census/Data/main_results_2017/2017_SF_Decom_Sampling_Data", full.names = TRUE)
filePaths <- filePaths[grepl(".csv", filePaths)]
dataList <- lapply(filePaths, read.csv, na.strings = c(".", " ", ""), nrows = 24, header = FALSE)
dataList <- lapply(dataList, transpose_data)
#add var to identify keypunchers and add column names
fileNames <- list.files("~/Documents/census/Data/main_results_2017/2017_SF_Decom_Sampling_Data", full.names = FALSE)
fileNames <- fileNames[grepl(".csv", fileNames)]
fileNameSplit <- str_split(fileNames, "_")
keyPunchers <- sapply(fileNameSplit, function(x){str_to_lower(x[1])})
dateEntered <- sapply(fileNameSplit, function(x){str_extract(x[2], ".+(?=.csv)")})
# keyPunchers <- str_extract(filePaths, "(?<=_)[A-Za-z]+(?=.csv)")
dataList <- lapply(1:length(dataList), function(i, d, kp, de){
dTemp = d[[i]]
dTemp[,"keypuncher"] = kp[i]
dTemp[,"dateEntered"] = de[i]
dTemp
}, d = dataList, kp = keyPunchers, de = dateEntered)
#row bind the data sets
mergedData = Reduce(rbind, dataList)
mergedData = as.data.frame(apply(mergedData, 2, trimws), stringsAsFactors = FALSE)
rownames(mergedData) <- NULL
#decode values
#Residence
mergedData$residence = ifelse(mergedData$resideNumeric == "1", "CA - San Francisco",
ifelse(mergedData$resideNumeric %in% c("2", "12"), "CA - Other City",
ifelse(mergedData$resideNumeric %in% c("3", "23"), "USA_other",
ifelse(mergedData$resideNumeric %in% c("4", "3,4"), "Other", NA))))
#Move SF information to residence_details and create combined SF category. This makes it more comparable to main census results
mergedData$residence_details[mergedData$residence == "CA - San Francisco"] = "San Francisco"
mergedData$residence[mergedData$residence %in% c("CA - San Francisco", "CA - Other City")] = "California"
mergedData$resideNumeric = NULL
#Gender
mergedData$gender = ifelse(mergedData$gender == 1, "female",
ifelse(mergedData$gender == 2, "male",
ifelse(mergedData$gender == 3, "fluid", NA)))
#Attended decompression previously
mergedData$attend_decom = ifelse(mergedData$attend_decom == 1, "Yes",
ifelse(mergedData$attend_decom == 2, "No", NA))
#Attended burn previously
mergedData$attend_burn = ifelse(mergedData$attend_burn == 1, "Yes, in 2017",
ifelse(mergedData$attend_burn == 2, "Yes, not in 2017",
ifelse(mergedData$attend_burn == 3, "No", NA)))
write.csv(mergedData, file = "2017_SF_Decom_Combined_Sampling_data.csv")
| /combine_data_entry_forms.R | no_license | gridge/bm-census-decomp-2017 | R | false | false | 3,335 | r | ###############################################
# Code for Decompression Census data cleaning #
###############################################
#####################
#Import libraries
library(data.table)
library(stringr)
library(dplyr)
transpose_data <- function(dat){
headers <- c("shift", "lane", "clicker", "skipVeto", "birth", "resideNumeric", "residence_details", "gender", "attend_decom",
"attend_burn", "nbburns")
dat <- data.frame(t(dat[,-c(1,2)]))
dat <- dat[, 1:length(headers)]
names(dat) <- headers
dat
}
#Apply over file paths to get a list of all data sets
filePaths <- list.files("~/Documents/census/Data/main_results_2017/2017_SF_Decom_Sampling_Data", full.names = TRUE)
filePaths <- filePaths[grepl(".csv", filePaths)]
dataList <- lapply(filePaths, read.csv, na.strings = c(".", " ", ""), nrows = 24, header = FALSE)
dataList <- lapply(dataList, transpose_data)
#add var to identify keypunchers and add column names
fileNames <- list.files("~/Documents/census/Data/main_results_2017/2017_SF_Decom_Sampling_Data", full.names = FALSE)
fileNames <- fileNames[grepl(".csv", fileNames)]
fileNameSplit <- str_split(fileNames, "_")
keyPunchers <- sapply(fileNameSplit, function(x){str_to_lower(x[1])})
dateEntered <- sapply(fileNameSplit, function(x){str_extract(x[2], ".+(?=.csv)")})
# keyPunchers <- str_extract(filePaths, "(?<=_)[A-Za-z]+(?=.csv)")
dataList <- lapply(1:length(dataList), function(i, d, kp, de){
dTemp = d[[i]]
dTemp[,"keypuncher"] = kp[i]
dTemp[,"dateEntered"] = de[i]
dTemp
}, d = dataList, kp = keyPunchers, de = dateEntered)
#row bind the data sets
mergedData = Reduce(rbind, dataList)
mergedData = as.data.frame(apply(mergedData, 2, trimws), stringsAsFactors = FALSE)
rownames(mergedData) <- NULL
#decode values
#Residence
mergedData$residence = ifelse(mergedData$resideNumeric == "1", "CA - San Francisco",
ifelse(mergedData$resideNumeric %in% c("2", "12"), "CA - Other City",
ifelse(mergedData$resideNumeric %in% c("3", "23"), "USA_other",
ifelse(mergedData$resideNumeric %in% c("4", "3,4"), "Other", NA))))
#Move SF information to residence_details and create combined SF category. This makes it more comparable to main census results
mergedData$residence_details[mergedData$residence == "CA - San Francisco"] = "San Francisco"
mergedData$residence[mergedData$residence %in% c("CA - San Francisco", "CA - Other City")] = "California"
mergedData$resideNumeric = NULL
#Gender
mergedData$gender = ifelse(mergedData$gender == 1, "female",
ifelse(mergedData$gender == 2, "male",
ifelse(mergedData$gender == 3, "fluid", NA)))
#Attended decompression previously
mergedData$attend_decom = ifelse(mergedData$attend_decom == 1, "Yes",
ifelse(mergedData$attend_decom == 2, "No", NA))
#Attended burn previously
mergedData$attend_burn = ifelse(mergedData$attend_burn == 1, "Yes, in 2017",
ifelse(mergedData$attend_burn == 2, "Yes, not in 2017",
ifelse(mergedData$attend_burn == 3, "No", NA)))
write.csv(mergedData, file = "2017_SF_Decom_Combined_Sampling_data.csv")
|
# 16-10-2018
# Week 3
#Peer-graded assigment: R Markdown Presentation & Plotly
#Read car and truck values from tab-delimited autos.dat
autos_data <- read.table("C:/R/autos.dat", header=T, sep="\t")
# Compute the largest y value used in the data (or we could
# just use range again)
max_y <- max(autos_data)
# Define colors to be used for cars, trucks, suvs
plot_colors <- c("blue","red","forestgreen")
# Start PNG device driver to save output to figure.png
png(filename="C:/R/figure.png", height=295, width=300,
bg="white")
# Graph autos using y axis that ranges from 0 to max_y.
# Turn off axes and annotations (axis labels) so we can
# specify them ourself
plot(autos_data$cars, type="o", col=plot_colors[1],
ylim=c(0,max_y), axes=FALSE, ann=FALSE)
# Make x axis using Mon-Fri labels
axis(1, at=1:5, lab=c("Mon", "Tue", "Wed", "Thu", "Fri"))
# Make y axis with horizontal labels that display ticks at
# every 4 marks. 4*0:max_y is equivalent to c(0,4,8,12).
axis(2, las=1, at=4*0:max_y)
# Create box around plot
box()
# Graph trucks with red dashed line and square points
lines(autos_data$trucks, type="o", pch=22, lty=2,
col=plot_colors[2])
# Graph suvs with green dotted line and diamond points
lines(autos_data$suvs, type="o", pch=23, lty=3,
col=plot_colors[3])
# Create a title with a red, bold/italic font
title(main="Autos", col.main="red", font.main=4)
# Label the x and y axes with dark green text
title(xlab= "Days", col.lab=rgb(0,0.5,0))
title(ylab= "Total", col.lab=rgb(0,0.5,0))
# Create a legend at (1, max_y) that is slightly smaller
# (cex) and uses the same line colors and points used by
# the actual plots
legend(1, max_y, names(autos_data), cex=0.8, col=plot_colors,
pch=21:23, lty=1:3);
# Turn off device driver (to flush output to png)
dev.off()
Graph plotting data from autos.dat
| /peer-graded assigment r markdown presentation plotly.r | no_license | Sudhanshu0809/R-Markdown-Presentation-Plotly | R | false | false | 1,923 | r | # 16-10-2018
# Week 3
#Peer-graded assigment: R Markdown Presentation & Plotly
#Read car and truck values from tab-delimited autos.dat
autos_data <- read.table("C:/R/autos.dat", header=T, sep="\t")
# Compute the largest y value used in the data (or we could
# just use range again)
max_y <- max(autos_data)
# Define colors to be used for cars, trucks, suvs
plot_colors <- c("blue","red","forestgreen")
# Start PNG device driver to save output to figure.png
png(filename="C:/R/figure.png", height=295, width=300,
bg="white")
# Graph autos using y axis that ranges from 0 to max_y.
# Turn off axes and annotations (axis labels) so we can
# specify them ourself
plot(autos_data$cars, type="o", col=plot_colors[1],
ylim=c(0,max_y), axes=FALSE, ann=FALSE)
# Make x axis using Mon-Fri labels
axis(1, at=1:5, lab=c("Mon", "Tue", "Wed", "Thu", "Fri"))
# Make y axis with horizontal labels that display ticks at
# every 4 marks. 4*0:max_y is equivalent to c(0,4,8,12).
axis(2, las=1, at=4*0:max_y)
# Create box around plot
box()
# Graph trucks with red dashed line and square points
lines(autos_data$trucks, type="o", pch=22, lty=2,
col=plot_colors[2])
# Graph suvs with green dotted line and diamond points
lines(autos_data$suvs, type="o", pch=23, lty=3,
col=plot_colors[3])
# Create a title with a red, bold/italic font
title(main="Autos", col.main="red", font.main=4)
# Label the x and y axes with dark green text
title(xlab= "Days", col.lab=rgb(0,0.5,0))
title(ylab= "Total", col.lab=rgb(0,0.5,0))
# Create a legend at (1, max_y) that is slightly smaller
# (cex) and uses the same line colors and points used by
# the actual plots
legend(1, max_y, names(autos_data), cex=0.8, col=plot_colors,
pch=21:23, lty=1:3);
# Turn off device driver (to flush output to png)
dev.off()
Graph plotting data from autos.dat
|
library(data.table)
library(data.cube)
### no hierarchy ----------------------------------------------------------
set.seed(1L)
ar.dimnames = list(color = sort(c("green","yellow","red")),
year = as.character(2011:2015),
status = sort(c("active","inactive","archived","removed")))
ar.dim = sapply(ar.dimnames, length)
ar = array(sample(c(rep(NA, 4), 4:7/2), prod(ar.dim), TRUE),
unname(ar.dim),
ar.dimnames)
cb = as.cube(ar)
# sorting of NA to last
r = rollup(cb, MARGIN = c("color","year"), j = .(value = sum(value)))
rr = format(r)
stopifnot(is.data.table(rr), rr[nrow(rr)][is.na(color) & is.na(year), .N==1L])
# custom format per measure, see *currency* example from nice SO: http://stackoverflow.com/a/23833928/2490497
printCurrency = function(value, currency.sym="$", digits=2, sep=",", decimal=".", ...) paste(currency.sym, formatC(value, format = "f", big.mark = sep, digits=digits, decimal.mark=decimal), sep="")
stopifnot(printCurrency(123123.334)=="$123,123.33")
rcurrency = format(r, measure.format = list(value = printCurrency))
stopifnot(
is.character(rcurrency$value),
as.numeric(gsub("$", "", rcurrency$value, fixed = TRUE))==format(r)$value
)
# dcast 2D
r = cb["green"]
rr = format(r, dcast = TRUE, formula = year ~ status)
stopifnot(all.equal(dim(rr), c(4L,4L)), identical(names(rr), c("year","active","inactive","removed")))
# dcast 3D
r = cb[c("green","red"),,c("active","inactive")]
rr = format(r, dcast = TRUE, formula = year ~ status + color)
stopifnot(all.equal(dim(rr), c(5L,5L)), identical(names(rr), c("year","active_green","active_red","inactive_green","inactive_red")))
### hierarchy ---------------------------------------------------------------
cb = as.cube(populate_star(1e5))
# sorting of NA to last
r = rollup(cb, MARGIN = c("prod_name","geog_abb"), j = .(value = sum(value)))
rr = format(r)
stopifnot(is.data.table(rr), rr[nrow(rr)][is.na(prod_name) & is.na(geog_abb), .N==1L])
# format rollup same dimension on 2 attributes in bad order - normalize to not use surrogate key
r = rollup(cb, MARGIN = c("geog_abb", "geog_division_name"), j = .(value = sum(value)), normalize = FALSE)
stopifnot(
is.data.table(r),
identical(names(r), c("geog_abb","geog_division_name","level","value")),
nrow(r)==101L,
r[nrow(r)][is.na(geog_abb) & is.na(geog_division_name), .N==1L]
)
# same as above in right order
r = rollup(cb, MARGIN = c("geog_division_name","geog_abb"), j = .(value = sum(value)), normalize = FALSE)
stopifnot(
is.data.table(r),
identical(names(r), c("geog_division_name","geog_abb","level","value")),
nrow(r)==60L,
r[nrow(r)][is.na(geog_abb) & is.na(geog_division_name), .N==1L]
)
# using hierarchy
r = rollup(cb, MARGIN = c("geog_division_name","time_year"), j = .(value = sum(value)))
rr = format(r)
stopifnot(is.data.table(rr), rr[nrow(rr)][is.na(geog_division_name) & is.na(time_year), .N==1L], nrow(rr)==55L)
# reverse order of dims
r = rollup(cb, MARGIN = c("time_year","geog_division_name"), j = .(value = sum(value)))
rr = format(r)
stopifnot(is.data.table(rr), rr[nrow(rr)][is.na(geog_division_name) & is.na(time_year), .N==1L], nrow(rr)==51L)
# custom format per measure, see *currency* example from nice SO: http://stackoverflow.com/a/23833928/2490497
printCurrency = function(value, currency.sym="$", digits=2, sep=",", decimal=".", ...) paste(currency.sym, formatC(value, format = "f", big.mark = sep, digits=digits, decimal.mark=decimal), sep="")
stopifnot(printCurrency(123123.334)=="$123,123.33")
rcurrency = format(r, measure.format = list(value = printCurrency))
stopifnot(
is.character(rcurrency$value),
all(substr(rcurrency$value,1L,1L)=="$"),
all.equal(as.numeric(gsub(",", "", gsub("$", "", rcurrency$value, fixed = TRUE))),format(r)$value)
)
# format levels of aggregates
r = rollup(cb, c("time_year","geog_region_name", "curr_type","prod_gear"), FUN = sum)
stopifnot(
nrow(format(r[,,,,0L]))==120L,
nrow(format(r[,,,,1L]))==5L,
nrow(format(r[,,,,2L]))==20L,
nrow(format(r[,,,,3L]))==40L,
nrow(format(r[,,,,4L]))==1L
)
# all dims should have single columns as rollup was on highest aggregates
stopifnot(all(r$dapply(ncol)==1L))
# dcast
r = rollup(cb, c("time_year","geog_division_name"), FUN = sum)
rr = format(r, dcast = TRUE, formula = time_year ~ geog_division_name)
dr = as.data.table(r, dcast = TRUE, formula = time_year ~ geog_division_name)
stopifnot(
all.equal(dim(rr), c(6L,11L)),
identical(names(rr), c("time_year", "East North Central", "East South Central", "Middle Atlantic", "Mountain", "New England", "Pacific", "South Atlantic", "West North Central", "West South Central", "NA")),
all.equal(dim(dr), c(6L,11L)),
identical(names(dr), c("time_year", "East North Central", "East South Central", "Middle Atlantic", "Mountain", "New England", "Pacific", "South Atlantic", "West North Central", "West South Central", "NA"))
)
# tests status ------------------------------------------------------------
invisible(TRUE)
| /tests/tests-format.R | no_license | AndreMikulec/data.cube | R | false | false | 5,059 | r | library(data.table)
library(data.cube)
### no hierarchy ----------------------------------------------------------
set.seed(1L)
ar.dimnames = list(color = sort(c("green","yellow","red")),
year = as.character(2011:2015),
status = sort(c("active","inactive","archived","removed")))
ar.dim = sapply(ar.dimnames, length)
ar = array(sample(c(rep(NA, 4), 4:7/2), prod(ar.dim), TRUE),
unname(ar.dim),
ar.dimnames)
cb = as.cube(ar)
# sorting of NA to last
r = rollup(cb, MARGIN = c("color","year"), j = .(value = sum(value)))
rr = format(r)
stopifnot(is.data.table(rr), rr[nrow(rr)][is.na(color) & is.na(year), .N==1L])
# custom format per measure, see *currency* example from nice SO: http://stackoverflow.com/a/23833928/2490497
printCurrency = function(value, currency.sym="$", digits=2, sep=",", decimal=".", ...) paste(currency.sym, formatC(value, format = "f", big.mark = sep, digits=digits, decimal.mark=decimal), sep="")
stopifnot(printCurrency(123123.334)=="$123,123.33")
rcurrency = format(r, measure.format = list(value = printCurrency))
stopifnot(
is.character(rcurrency$value),
as.numeric(gsub("$", "", rcurrency$value, fixed = TRUE))==format(r)$value
)
# dcast 2D
r = cb["green"]
rr = format(r, dcast = TRUE, formula = year ~ status)
stopifnot(all.equal(dim(rr), c(4L,4L)), identical(names(rr), c("year","active","inactive","removed")))
# dcast 3D
r = cb[c("green","red"),,c("active","inactive")]
rr = format(r, dcast = TRUE, formula = year ~ status + color)
stopifnot(all.equal(dim(rr), c(5L,5L)), identical(names(rr), c("year","active_green","active_red","inactive_green","inactive_red")))
### hierarchy ---------------------------------------------------------------
cb = as.cube(populate_star(1e5))
# sorting of NA to last
r = rollup(cb, MARGIN = c("prod_name","geog_abb"), j = .(value = sum(value)))
rr = format(r)
stopifnot(is.data.table(rr), rr[nrow(rr)][is.na(prod_name) & is.na(geog_abb), .N==1L])
# format rollup same dimension on 2 attributes in bad order - normalize to not use surrogate key
r = rollup(cb, MARGIN = c("geog_abb", "geog_division_name"), j = .(value = sum(value)), normalize = FALSE)
stopifnot(
is.data.table(r),
identical(names(r), c("geog_abb","geog_division_name","level","value")),
nrow(r)==101L,
r[nrow(r)][is.na(geog_abb) & is.na(geog_division_name), .N==1L]
)
# same as above in right order
r = rollup(cb, MARGIN = c("geog_division_name","geog_abb"), j = .(value = sum(value)), normalize = FALSE)
stopifnot(
is.data.table(r),
identical(names(r), c("geog_division_name","geog_abb","level","value")),
nrow(r)==60L,
r[nrow(r)][is.na(geog_abb) & is.na(geog_division_name), .N==1L]
)
# using hierarchy
r = rollup(cb, MARGIN = c("geog_division_name","time_year"), j = .(value = sum(value)))
rr = format(r)
stopifnot(is.data.table(rr), rr[nrow(rr)][is.na(geog_division_name) & is.na(time_year), .N==1L], nrow(rr)==55L)
# reverse order of dims
r = rollup(cb, MARGIN = c("time_year","geog_division_name"), j = .(value = sum(value)))
rr = format(r)
stopifnot(is.data.table(rr), rr[nrow(rr)][is.na(geog_division_name) & is.na(time_year), .N==1L], nrow(rr)==51L)
# custom format per measure, see *currency* example from nice SO: http://stackoverflow.com/a/23833928/2490497
printCurrency = function(value, currency.sym="$", digits=2, sep=",", decimal=".", ...) paste(currency.sym, formatC(value, format = "f", big.mark = sep, digits=digits, decimal.mark=decimal), sep="")
stopifnot(printCurrency(123123.334)=="$123,123.33")
rcurrency = format(r, measure.format = list(value = printCurrency))
stopifnot(
is.character(rcurrency$value),
all(substr(rcurrency$value,1L,1L)=="$"),
all.equal(as.numeric(gsub(",", "", gsub("$", "", rcurrency$value, fixed = TRUE))),format(r)$value)
)
# format levels of aggregates
r = rollup(cb, c("time_year","geog_region_name", "curr_type","prod_gear"), FUN = sum)
stopifnot(
nrow(format(r[,,,,0L]))==120L,
nrow(format(r[,,,,1L]))==5L,
nrow(format(r[,,,,2L]))==20L,
nrow(format(r[,,,,3L]))==40L,
nrow(format(r[,,,,4L]))==1L
)
# all dims should have single columns as rollup was on highest aggregates
stopifnot(all(r$dapply(ncol)==1L))
# dcast
r = rollup(cb, c("time_year","geog_division_name"), FUN = sum)
rr = format(r, dcast = TRUE, formula = time_year ~ geog_division_name)
dr = as.data.table(r, dcast = TRUE, formula = time_year ~ geog_division_name)
stopifnot(
all.equal(dim(rr), c(6L,11L)),
identical(names(rr), c("time_year", "East North Central", "East South Central", "Middle Atlantic", "Mountain", "New England", "Pacific", "South Atlantic", "West North Central", "West South Central", "NA")),
all.equal(dim(dr), c(6L,11L)),
identical(names(dr), c("time_year", "East North Central", "East South Central", "Middle Atlantic", "Mountain", "New England", "Pacific", "South Atlantic", "West North Central", "West South Central", "NA"))
)
# tests status ------------------------------------------------------------
invisible(TRUE)
|
# T test
# set working directory
setwd("~/Desktop/calc_score/matched_stability/graphing/cor_entropy_RSA/")
# Load packages
library(tidyr)
library(ggplot2)
library(dplyr)
# read data
evolved_matched <- read.csv("graph_mean_data_evolved_matched.csv", header = TRUE, sep = "")
rosetta_matched <- read.csv("graph_mean_data_rosetta_matched.csv", header = TRUE, sep = "")
evolved_max <- read.csv("graph_mean_data_evolved_max.csv", header = TRUE, sep = "")
evolved_mean <- read.csv("graph_mean_data_evolved_mean.csv", header = TRUE, sep = "")
rosetta <- read.csv("graph_mean_data_rosetta.csv", header = TRUE, sep = "")
# t-test
t.test(evolved_matched$cor_entropy_RSA, rosetta_matched$cor_entropy_RSA,
alternative = "greater", paired = T)
t.test(evolved_mean$cor_entropy_RSA, rosetta$cor_entropy_RSA,
alternative = "greater", paired = T)
t.test(evolved_max$cor_entropy_RSA, rosetta$cor_entropy_RSA,
alternative = "greater", paired = T)
t.test(evolved_mean$cor_entropy_RSA, evolved_max$cor_entropy_RSA,
paired = T)
| /graphing/cor_entropy_RSA_combo/t_test.R | no_license | clauswilke/evol_sim_vs_rosetta | R | false | false | 1,047 | r |
# T test
# set working directory
setwd("~/Desktop/calc_score/matched_stability/graphing/cor_entropy_RSA/")
# Load packages
library(tidyr)
library(ggplot2)
library(dplyr)
# read data
evolved_matched <- read.csv("graph_mean_data_evolved_matched.csv", header = TRUE, sep = "")
rosetta_matched <- read.csv("graph_mean_data_rosetta_matched.csv", header = TRUE, sep = "")
evolved_max <- read.csv("graph_mean_data_evolved_max.csv", header = TRUE, sep = "")
evolved_mean <- read.csv("graph_mean_data_evolved_mean.csv", header = TRUE, sep = "")
rosetta <- read.csv("graph_mean_data_rosetta.csv", header = TRUE, sep = "")
# t-test
t.test(evolved_matched$cor_entropy_RSA, rosetta_matched$cor_entropy_RSA,
alternative = "greater", paired = T)
t.test(evolved_mean$cor_entropy_RSA, rosetta$cor_entropy_RSA,
alternative = "greater", paired = T)
t.test(evolved_max$cor_entropy_RSA, rosetta$cor_entropy_RSA,
alternative = "greater", paired = T)
t.test(evolved_mean$cor_entropy_RSA, evolved_max$cor_entropy_RSA,
paired = T)
|
# plt_sizeComps.R
# Agorithm for sorting factors so plots descend in rows.
# 1. get length of 1d ribbon (n)
# 2. determine ncol = ceiling(n/sqrt(n))
# 3. determine nrow = ceiling(n/ncol)
# 4. convert year vector into matrix with nrow & ncol
# 5. transpose & vectorize as.vector(t(matrix(yr,nrow,ncol)))
# 6. Use this as levels for years factor.
get.df <- function(M)
{
n <- length(M)
ldf <- list()
mdf <- mpf <- mrf <-NULL
for(i in 1:n)
{
A <- M[[i]]
df <- data.frame(Model=names(M)[i],
cbind(A$d3_SizeComps[,1:8],A$d3_obs_size_comps))
pf <- data.frame(Model=names(M)[i],
cbind(A$d3_SizeComps[,1:8],A$d3_pre_size_comps))
rf <- data.frame(Model=names(M)[i],
cbind(A$d3_SizeComps[,1:8],A$d3_res_size_comps))
colnames(df) <- tolower(c("Model",
"Year", "Seas",
"Fleet", "Sex",
"Type", "Shell",
"Maturity", "Nsamp",
as.character(A$mid_points)))
colnames(pf) <- colnames(rf) <- colnames(df)
df$fleet <- pf$fleet <- rf$fleet <- .FLEET[df$fleet]
df$sex <- pf$sex <- rf$sex <- .SEX[df$sex+1]
df$shell <- pf$shell <- rf$shell <- .SHELL[df$shell+1]
df$maturity <- pf$maturity <- rf$maturity <- .MATURITY[df$maturity+1]
df$type <- pf$type <- rf$type <- .TYPE[df$type+1]
df$seas <- pf$seas <- rf$seas <- .SEAS[df$seas]
mdf <- rbind(mdf,df)
mpf <- rbind(mpf,pf)
mrf <- rbind(mrf,rf)
}
mdf <- melt(mdf,id.var=1:9)
mpf <- melt(mpf,id.var=1:9)
mrf <- melt(mrf,id.var=1:9)
for(i in 1:n)
{
j <- 1
for(k in unique(df$fleet))
for(h in unique(df$sex))
for(t in unique(df$type))
for(s in unique(df$shell))
{
tdf <- mdf %>% filter(fleet==k) %>% filter(sex==h) %>% filter(type==t) %>% filter(shell==s)
tpf <- mpf %>% filter(fleet==k) %>% filter(sex==h) %>% filter(type==t) %>% filter(shell==s)
trf <- mrf %>% filter(fleet==k) %>% filter(sex==h) %>% filter(type==t) %>% filter(shell==s)
if(dim(tdf)[1]!=0)
{
# deterimin row & column.
# fyr = unique(tdf$year)
# syr = min(fyr); nyr = max(fyr)
# nn = (nyr-syr+1)
# nc = ceiling(nn/sqrt(nn))
# irow = icol = rep(1,length=nn)
# ii = ic = ir = 1
# for(iyr in fyr)
# {
# icol[ii] <- ic
# irow[ii] <- ir
# ic = ic + 1
# ii = ii + 1
# if(ic > nc)
# {
# ic = 1
# ir = ir + 1
# }
# }
# tdf$irow = irow[tdf$year-syr+1]
# tdf$icol = icol[tdf$year-syr+1]
# cat(" n = ",nn,"\n")
# print(tdf$year - syr + 1)
ldf[[j]] <-cbind(tdf,pred=tpf$value,resd=trf$value)
j <- j + 1
}
}
}
return(ldf)
}
plot.sizeComps <- function( M, which.plot="all" )
{
mdf <- get.df( M )
ix <- pretty(1:length(M[[1]]$mid_points))
p <- ggplot(data=mdf[[1]])
p <- p + geom_bar(aes(variable,value),stat="identity",position="dodge",alpha=0.15)
p <- p + geom_line(aes(as.numeric(variable),pred,col=model),alpha=0.85)
p <- p + scale_x_discrete(breaks=M[[1]]$mid_points[ix])
p <- p + labs(x="Size (mm)",y="Proportion",col="Model",fill="Sex",linetype="Fleet")
p <- p + ggtitle("title")
p <- p + facet_wrap(~year) + .THEME #+ coord_flip()
# p <- p + facet_grid(irow~icol,labeller=label_both) + .THEME
p <- p + theme(axis.text.x = element_text(angle=45,vjust=0.5))
fun <- function(x,p)
{
p$labels$title = paste("Gear =",unique(x$fleet),
"\n Sex =",unique(x$sex),
", Type =",unique(x$type),
", Shell=",unique(x$shell),
", Maturity=",unique(x$maturity))
p %+% x
}
plist <- lapply(mdf,fun,p=p)
if ( which.plot == "all" )
{
print( plist )
}
else
{
print( plist[[which.plot]] )
}
}
# Residual bubble plots
plot.SizeCompRes <- function( M, which.plot="all" )
{
mdf <- get.df( M )
p <- ggplot(data=mdf[[1]])
p <- p + geom_point(aes(factor(year),variable,col=factor(sign(resd)),size=abs(resd)),alpha=0.6)
# p <- p + scale_size_area(max_size=10)
# p <- p + labs(x="Year",y="Length",col="Sign",size="Residual")
# p <- p + scale_x_discrete(breaks=pretty(mdf[[1]]$mod_yrs))
# p <- p + scale_y_discrete(breaks=pretty(mdf[[1]]$mid_points))
p <- p + facet_wrap(~model)+ .THEME
fun <- function(x,p)
{
p$labels$title = paste("Gear =",unique(x$fleet),"\n Sex =",unique(x$sex),", Type =",unique(x$type))
p %+% x
}
plist <- lapply(mdf,fun,p=p)
if ( which.plot == "all" )
{
print( plist )
}
else
{
print( plist[[which.plot]] )
}
}
| /gmr/R4GMACS/plt_sizeComps.R | no_license | seacode/gmacs | R | false | false | 4,773 | r | # plt_sizeComps.R
# Agorithm for sorting factors so plots descend in rows.
# 1. get length of 1d ribbon (n)
# 2. determine ncol = ceiling(n/sqrt(n))
# 3. determine nrow = ceiling(n/ncol)
# 4. convert year vector into matrix with nrow & ncol
# 5. transpose & vectorize as.vector(t(matrix(yr,nrow,ncol)))
# 6. Use this as levels for years factor.
get.df <- function(M)
{
n <- length(M)
ldf <- list()
mdf <- mpf <- mrf <-NULL
for(i in 1:n)
{
A <- M[[i]]
df <- data.frame(Model=names(M)[i],
cbind(A$d3_SizeComps[,1:8],A$d3_obs_size_comps))
pf <- data.frame(Model=names(M)[i],
cbind(A$d3_SizeComps[,1:8],A$d3_pre_size_comps))
rf <- data.frame(Model=names(M)[i],
cbind(A$d3_SizeComps[,1:8],A$d3_res_size_comps))
colnames(df) <- tolower(c("Model",
"Year", "Seas",
"Fleet", "Sex",
"Type", "Shell",
"Maturity", "Nsamp",
as.character(A$mid_points)))
colnames(pf) <- colnames(rf) <- colnames(df)
df$fleet <- pf$fleet <- rf$fleet <- .FLEET[df$fleet]
df$sex <- pf$sex <- rf$sex <- .SEX[df$sex+1]
df$shell <- pf$shell <- rf$shell <- .SHELL[df$shell+1]
df$maturity <- pf$maturity <- rf$maturity <- .MATURITY[df$maturity+1]
df$type <- pf$type <- rf$type <- .TYPE[df$type+1]
df$seas <- pf$seas <- rf$seas <- .SEAS[df$seas]
mdf <- rbind(mdf,df)
mpf <- rbind(mpf,pf)
mrf <- rbind(mrf,rf)
}
mdf <- melt(mdf,id.var=1:9)
mpf <- melt(mpf,id.var=1:9)
mrf <- melt(mrf,id.var=1:9)
for(i in 1:n)
{
j <- 1
for(k in unique(df$fleet))
for(h in unique(df$sex))
for(t in unique(df$type))
for(s in unique(df$shell))
{
tdf <- mdf %>% filter(fleet==k) %>% filter(sex==h) %>% filter(type==t) %>% filter(shell==s)
tpf <- mpf %>% filter(fleet==k) %>% filter(sex==h) %>% filter(type==t) %>% filter(shell==s)
trf <- mrf %>% filter(fleet==k) %>% filter(sex==h) %>% filter(type==t) %>% filter(shell==s)
if(dim(tdf)[1]!=0)
{
# deterimin row & column.
# fyr = unique(tdf$year)
# syr = min(fyr); nyr = max(fyr)
# nn = (nyr-syr+1)
# nc = ceiling(nn/sqrt(nn))
# irow = icol = rep(1,length=nn)
# ii = ic = ir = 1
# for(iyr in fyr)
# {
# icol[ii] <- ic
# irow[ii] <- ir
# ic = ic + 1
# ii = ii + 1
# if(ic > nc)
# {
# ic = 1
# ir = ir + 1
# }
# }
# tdf$irow = irow[tdf$year-syr+1]
# tdf$icol = icol[tdf$year-syr+1]
# cat(" n = ",nn,"\n")
# print(tdf$year - syr + 1)
ldf[[j]] <-cbind(tdf,pred=tpf$value,resd=trf$value)
j <- j + 1
}
}
}
return(ldf)
}
plot.sizeComps <- function( M, which.plot="all" )
{
mdf <- get.df( M )
ix <- pretty(1:length(M[[1]]$mid_points))
p <- ggplot(data=mdf[[1]])
p <- p + geom_bar(aes(variable,value),stat="identity",position="dodge",alpha=0.15)
p <- p + geom_line(aes(as.numeric(variable),pred,col=model),alpha=0.85)
p <- p + scale_x_discrete(breaks=M[[1]]$mid_points[ix])
p <- p + labs(x="Size (mm)",y="Proportion",col="Model",fill="Sex",linetype="Fleet")
p <- p + ggtitle("title")
p <- p + facet_wrap(~year) + .THEME #+ coord_flip()
# p <- p + facet_grid(irow~icol,labeller=label_both) + .THEME
p <- p + theme(axis.text.x = element_text(angle=45,vjust=0.5))
fun <- function(x,p)
{
p$labels$title = paste("Gear =",unique(x$fleet),
"\n Sex =",unique(x$sex),
", Type =",unique(x$type),
", Shell=",unique(x$shell),
", Maturity=",unique(x$maturity))
p %+% x
}
plist <- lapply(mdf,fun,p=p)
if ( which.plot == "all" )
{
print( plist )
}
else
{
print( plist[[which.plot]] )
}
}
# Residual bubble plots
plot.SizeCompRes <- function( M, which.plot="all" )
{
mdf <- get.df( M )
p <- ggplot(data=mdf[[1]])
p <- p + geom_point(aes(factor(year),variable,col=factor(sign(resd)),size=abs(resd)),alpha=0.6)
# p <- p + scale_size_area(max_size=10)
# p <- p + labs(x="Year",y="Length",col="Sign",size="Residual")
# p <- p + scale_x_discrete(breaks=pretty(mdf[[1]]$mod_yrs))
# p <- p + scale_y_discrete(breaks=pretty(mdf[[1]]$mid_points))
p <- p + facet_wrap(~model)+ .THEME
fun <- function(x,p)
{
p$labels$title = paste("Gear =",unique(x$fleet),"\n Sex =",unique(x$sex),", Type =",unique(x$type))
p %+% x
}
plist <- lapply(mdf,fun,p=p)
if ( which.plot == "all" )
{
print( plist )
}
else
{
print( plist[[which.plot]] )
}
}
|
#' Compile trial-by-trial results
#'
#' Compiles a data frame of trial-by-trial results for each participant.
#' This function is intended to be called once data have been collected
#' for several participants; it allows the researcher to inspect
#' test results on a trial-by-trial level.
#'
#' Suppose that a participant completes 5 items in the test.
#' They will have 5 rows in the table, corresponding to their 5 items.
#' You can find the participant's rows by filtering by the "p_id" column.
#' The column "num" indexes these rows: 1 corresponds to the first item
#' they took, 2 corresponds to the second item, and so on.
#' The ability scores are given in the "ability_" columns; they tell
#' you the participant's estimated ability after having answered that
#' particular item. To get the participant's final estimated ability score,
#' look for the highest value of "num", in this case 5. Then look up the ability
#' score within that row.
#'
#' There are different columns for the different ability
#' estimation methods, make sure you are looking at the right one.
#' The correct one to look at corresponds to the \code{final_ability.estimator}
#' parameter in your adaptive test. We most commonly use weighted likelihood,
#' i.e. "WL". This is stored in the column "ability_WL".
#' If you are unsure that you are looking at the right column,
#' you can compare the results to the CSV results that you can download
#' from the psychTestR admin interface, which provide
#' solely the participant's final ability score.
#'
#' @param in_dir Results directory to process.
#' @param label Label that the test's results were saved under (e.g. "MDT").
#' @param combine Whether to combine results into one big data frame,
#' or instead to return a list of data frames, one for each participant.
#' @return A data frame, or list of data frames, of trial-by-trial results.
#' @export
compile_trial_by_trial_results <- function(in_dir = "output/results", label,
combine = TRUE) {
stopifnot(is.character(in_dir), is.scalar(in_dir),
is.logical(combine), is.scalar(combine))
if (!dir.exists(in_dir))
stop("input directory '", in_dir, "' could not be found")
files <- list.files(in_dir, pattern = "\\.rds",
ignore.case = TRUE, full.names = TRUE)
n <- list(rds = length(files),
results = 0L,
test = 0L)
res <- list()
for (i in seq_along(files)) {
file <- files[i]
x <- readRDS(file)
if (is(x, "results")) {
n$results <- n$results + 1L
x <- as.list(x)
if (!is.null(x[[label]])) {
if (sum(names(x) == label) > 1L)
stop("found a results file ('", file, "')",
" with multiple outputs with label '", label,
"', not sure what to do here. You might have to process ",
"this directory manually with readRDS().")
n$test <- n$test + 1L
df <- attr(x[[label]]$ability, "metadata")$results
if (!is.data.frame(df)) stop("malformed ", label, " results for file ", file)
p_id <- x$session$p_id
df <- cbind(p_id = p_id, test = label, file = file, df)
res[[length(res) + 1L]] <- df
}
}
}
if (combine) res <- do.call(plyr::rbind.fill, res)
message(sprintf(paste0("processed %i RDS files, %i of which were psychTestR results files, ",
"%i of which contained %s results"),
n$rds, n$results, n$test, label))
res
}
| /R/compile-trial-by-trial-results.R | permissive | pmcharrison/psychTestRCAT | R | false | false | 3,510 | r | #' Compile trial-by-trial results
#'
#' Compiles a data frame of trial-by-trial results for each participant.
#' This function is intended to be called once data have been collected
#' for several participants; it allows the researcher to inspect
#' test results on a trial-by-trial level.
#'
#' Suppose that a participant completes 5 items in the test.
#' They will have 5 rows in the table, corresponding to their 5 items.
#' You can find the participant's rows by filtering by the "p_id" column.
#' The column "num" indexes these rows: 1 corresponds to the first item
#' they took, 2 corresponds to the second item, and so on.
#' The ability scores are given in the "ability_" columns; they tell
#' you the participant's estimated ability after having answered that
#' particular item. To get the participant's final estimated ability score,
#' look for the highest value of "num", in this case 5. Then look up the ability
#' score within that row.
#'
#' There are different columns for the different ability
#' estimation methods, make sure you are looking at the right one.
#' The correct one to look at corresponds to the \code{final_ability.estimator}
#' parameter in your adaptive test. We most commonly use weighted likelihood,
#' i.e. "WL". This is stored in the column "ability_WL".
#' If you are unsure that you are looking at the right column,
#' you can compare the results to the CSV results that you can download
#' from the psychTestR admin interface, which provide
#' solely the participant's final ability score.
#'
#' @param in_dir Results directory to process.
#' @param label Label that the test's results were saved under (e.g. "MDT").
#' @param combine Whether to combine results into one big data frame,
#' or instead to return a list of data frames, one for each participant.
#' @return A data frame, or list of data frames, of trial-by-trial results.
#' @export
compile_trial_by_trial_results <- function(in_dir = "output/results", label,
combine = TRUE) {
stopifnot(is.character(in_dir), is.scalar(in_dir),
is.logical(combine), is.scalar(combine))
if (!dir.exists(in_dir))
stop("input directory '", in_dir, "' could not be found")
files <- list.files(in_dir, pattern = "\\.rds",
ignore.case = TRUE, full.names = TRUE)
n <- list(rds = length(files),
results = 0L,
test = 0L)
res <- list()
for (i in seq_along(files)) {
file <- files[i]
x <- readRDS(file)
if (is(x, "results")) {
n$results <- n$results + 1L
x <- as.list(x)
if (!is.null(x[[label]])) {
if (sum(names(x) == label) > 1L)
stop("found a results file ('", file, "')",
" with multiple outputs with label '", label,
"', not sure what to do here. You might have to process ",
"this directory manually with readRDS().")
n$test <- n$test + 1L
df <- attr(x[[label]]$ability, "metadata")$results
if (!is.data.frame(df)) stop("malformed ", label, " results for file ", file)
p_id <- x$session$p_id
df <- cbind(p_id = p_id, test = label, file = file, df)
res[[length(res) + 1L]] <- df
}
}
}
if (combine) res <- do.call(plyr::rbind.fill, res)
message(sprintf(paste0("processed %i RDS files, %i of which were psychTestR results files, ",
"%i of which contained %s results"),
n$rds, n$results, n$test, label))
res
}
|
playoffs = read.csv("data/EM2004.2012.publ.txt", header=TRUE, sep="\t")
## Change attributes to factors
playoffs$host = as.factor(playoffs$host)
playoffs$host_opponent = as.factor(playoffs$host_opponent)
playoffs$vicinity = as.factor(playoffs$vicinity)
playoffs$vicinity_opponent = as.factor(playoffs$vicinity_opponent)
playoffs$nationality_coach = as.factor(playoffs$nationality_coach)
playoffs$nationality_coach_opponent = as.factor(playoffs$nationality_coach_opponent)
## Add the data from 2016
teams = as.character(levels(playoffs$team))
res2016 = read.csv("data/EM2016.csv", header=TRUE, sep=",")
odds = read.csv("data/QuotenEM2016.txt", header=TRUE, sep="\t")
value = read.csv("data/Wert2016.txt", header=TRUE, sep="\t")
fifa16 = read.csv("data/FIFA2016.txt", header=TRUE, sep="\t")
uefa16 = read.csv("data/UEFA2016.txt", header=TRUE, sep=",")
neues16 = read.csv("data/Neues2016.txt", header=TRUE, sep="\t")
names(fifa16) = c("Fifa_rank", "team", "FIFA_points")
names(neues16) = c("team", "CL_players", "UEFA_players", "vicinity", "age_coach", "nationality_coach", "foreigners", "max1", "max2")
names(odds) = c("team", "odds")
names(value) = c("team", "age", "market_value")
value$market_value = as.numeric(gsub(",", ".", as.character(value$market_value)))
res2016$team = as.character(res2016$team)
res2016$opponent = as.character(res2016$opponent)
res2016$german_team = as.character(res2016$german_team)
res2016$german_opponent = as.character(res2016$german_opponent)
uefa16$team = as.character(uefa16$team)
uefa16[uefa16$team == "Republik Irland",]$team = "Irland"
uefa16$UEFA_points = uefa16$UEFA_points / 1000
fifa16$team = as.character(fifa16$team)
fifa16[fifa16$team == "Republik Irland",]$team = "Irland"
neues16$team = as.character(neues16$team)
odds$team = as.character(odds$team)
value$team = as.character(value$team)
res2016$id = res2016$id + max(playoffs$id)
res2016$max2 = NA
res2016$odds = NA
res2016$market_value = NA
res2016$FIFA_points = NA
res2016$UEFA_rank = NA
res2016$UEFA_points = NA
res2016$CL_players = NA
res2016$max2_opponent = NA
res2016$odds_opponent = NA
res2016$market_value_opponent = NA
res2016$FIFA_points_opponent = NA
res2016$UEFA_rank_opponent = NA
res2016$UEFA_points_opponent = NA
res2016$CL_players_opponent = NA
for (i in 1:nrow(res2016)) {
team = res2016[i,]$german_team
opponent = res2016[i,]$german_opponent
res2016[i,]$max2 = neues16[neues16$team == team, "max2"]
res2016[i,]$odds = odds[odds$team == team, "odds"]
res2016[i,]$market_value = value[value$team == team, "market_value"]
res2016[i,]$FIFA_points = fifa16[fifa16$team == team, "FIFA_points"]
res2016[i,]$UEFA_rank = uefa16[uefa16$team == team, "UEFA_rank"]
res2016[i,]$UEFA_points = uefa16[uefa16$team == team, "UEFA_points"]
res2016[i,]$CL_players = neues16[neues16$team == team, "CL_players"]
res2016[i,]$max2_opponent = neues16[neues16$team == opponent, "max2"]
res2016[i,]$odds_opponent = odds[odds$team == opponent, "odds"]
res2016[i,]$market_value_opponent = value[value$team == opponent, "market_value"]
res2016[i,]$FIFA_points_opponent = fifa16[fifa16$team == opponent, "FIFA_points"]
res2016[i,]$UEFA_rank_opponent = uefa16[uefa16$team == opponent, "UEFA_rank"]
res2016[i,]$UEFA_points_opponent = uefa16[uefa16$team == opponent, "UEFA_points"]
res2016[i,]$CL_players_opponent = neues16[neues16$team == opponent, "CL_players"]
}
# reorder goals column
goals16 = res2016$goals
res2016$goals = NULL
res2016$goals = goals16
## remove unwanted columns
## This features were determined to be insignificant
playoffs$year = NULL
playoffs$max1 = NULL
playoffs$max1_opponent = NULL
playoffs$vicinity = NULL
playoffs$vicinity_opponent = NULL
playoffs$foreigners = NULL
playoffs$foreigners_opponent = NULL
playoffs$age_coach = NULL
playoffs$age_coach_opponent = NULL
playoffs$EL_players = NULL
playoffs$EL_players_opponent = NULL
playoffs$GDP = NULL
playoffs$GDP_opponent = NULL
playoffs$population = NULL
playoffs$population_opponent = NULL
playoffs$phase = NULL
playoffs$stage = NULL
playoffs$age = NULL
playoffs$age_opponent = NULL
playoffs$host = NULL
playoffs$host_opponent = NULL
playoffs$FIFA_rank = NULL
playoffs$FIFA_rank_opponent = NULL
playoffs$nationality_coach = NULL
playoffs$nationality_coach_opponent = NULL
# reorder goals
goals04_12 = playoffs$goals
playoffs$goals = NULL
playoffs$goals = goals04_12
## Add 2016 data to the table
res2016$german_team = NULL
res2016$german_opponent = NULL
playoffs = rbind(playoffs, res2016)
## discretize the result of a game to a factor of 3 levels
## win, loss, draw
playoffs$result = NA
for(team1 in playoffs$team) {
for(team2 in playoffs$opponent) {
goals1 = playoffs[playoffs$team == team1 & playoffs$opponent == team2,]$goals
goals2 = playoffs[playoffs$team == team2 & playoffs$opponent == team1,]$goals
playoffs[playoffs$team == team1 & playoffs$opponent == team2,]$result = (goals1 - goals2)
playoffs[playoffs$team == team2 & playoffs$opponent == team1,]$result = (goals2 - goals1)
}
}
# install.packages("arules")
library(arules)
playoffs$result = discretize(playoffs$result, method="fixed", categories = c(-Inf, -0.5, 0.5, +Inf))
levels(playoffs$result) = c("loss", "draw", "win")
# ## add column with scores accoring to the performance in the previous three years
# scores = read.csv("data/Team_scores_08-12.txt", header=TRUE, sep="\t")
# scores = as.data.frame(scores)
# colnames(scores) = c("team", "score_04", "score_08", "score_12")
# playoffs$past_scores = rep(0, length(playoffs$team))
# playoffs$past_scores_opponent = rep(0, length(playoffs$team))
# for (i in 1:length(scores[["team"]])) {
# team = as.character(scores[i,"team"])
# sc4 = scores[i,"score_04"]
# sc8 = scores[i,"score_08"]
# sc12 = scores[i,"score_12"]
# print(team)
# for (j in 1:length(playoffs$team)){
# if(as.character(playoffs[j,]$team) == team){
# if(playoffs[j,]$year == 2004){
# playoffs[j,"past_scores"] = sc4
# }else if(playoffs[j,]$year == 2008){
# playoffs[j,"past_scores"] = sc8
# }else{
# playoffs[j,"past_scores"] = sc12
# }
# }
# }
# for (j in 1:length(playoffs$team)){
# if(as.character(playoffs[j,]$opponent) == team){
# if(playoffs[j,]$year == 2004){
# playoffs[j,"past_scores_opponent"] = sc4
# }else if(playoffs[j,]$year == 2008){
# playoffs[j,"past_scores_opponent"] = sc8
# }else{
# playoffs[j,"past_scores_opponent"] = sc12
# }
# }
# }
# }
append_info = function(df, new_df, col_name){
for (i in 1:length(new_df[["team"]])) {
team = as.character(new_df[i,"team"])
for (j in 1:length(df$team)){
if(as.character(df[j,]$team) == team){
df[j, col_name] = new_df[i, col_name]
}
}
for (j in 1:length(df$opponent)){
if(as.character(df[j,]$opponent) == team){
df[j, paste(col_name, "opponent", sep="_")] = new_df[i, col_name]
}
}
}
return(df)
}
## add column with nation's GDP, population and #won_medals in oly. games
nat_info = read.csv("data/additional_info_countries.csv", header=TRUE, sep=",")
nat_info = as.data.frame(nat_info)
colnames(nat_info) = c("team", "population", "GDP", "olymedals")
playoffs$GDP = rep(0, length(playoffs$team))
playoffs$GDP_opponent = rep(0, length(playoffs$team))
playoffs$population = rep(0, length(playoffs$team))
playoffs$population_opponent = rep(0, length(playoffs$team))
playoffs = append_info(playoffs, nat_info, "GDP")
playoffs = append_info(playoffs, nat_info, "population")
# this one is very tricky so better not use it:
# playoffs$olymedals = rep(0, length(playoffs$team))
# playoffs$olymedals_opponent = rep(0, length(playoffs$team))
# playoffs = append_info(playoffs, nat_info, "olymedals")
## normalization of all not factor-features
for (col_name in names(playoffs)) {
if (!is.factor(playoffs[,col_name]) & col_name != "goals" & col_name != "id"){
playoffs[,col_name] = apply(t(playoffs[, col_name]), 2,
FUN = function(x){(x-min(playoffs[, col_name]))/(max(playoffs[, col_name])-min(playoffs[, col_name]))})
}
}
write.csv(playoffs, file="data/playoffs.csv", row.names = FALSE)
filtered_match_duplicates = playoffs[-(seq(2,to=nrow(playoffs),by=2)),]
write.csv(filtered_match_duplicates, file="data/playoffs_wo_duplicates.csv", row.names = FALSE)
| /dataprep.R | no_license | taimir/football_2016 | R | false | false | 8,350 | r | playoffs = read.csv("data/EM2004.2012.publ.txt", header=TRUE, sep="\t")
## Change attributes to factors
playoffs$host = as.factor(playoffs$host)
playoffs$host_opponent = as.factor(playoffs$host_opponent)
playoffs$vicinity = as.factor(playoffs$vicinity)
playoffs$vicinity_opponent = as.factor(playoffs$vicinity_opponent)
playoffs$nationality_coach = as.factor(playoffs$nationality_coach)
playoffs$nationality_coach_opponent = as.factor(playoffs$nationality_coach_opponent)
## Add the data from 2016
teams = as.character(levels(playoffs$team))
res2016 = read.csv("data/EM2016.csv", header=TRUE, sep=",")
odds = read.csv("data/QuotenEM2016.txt", header=TRUE, sep="\t")
value = read.csv("data/Wert2016.txt", header=TRUE, sep="\t")
fifa16 = read.csv("data/FIFA2016.txt", header=TRUE, sep="\t")
uefa16 = read.csv("data/UEFA2016.txt", header=TRUE, sep=",")
neues16 = read.csv("data/Neues2016.txt", header=TRUE, sep="\t")
names(fifa16) = c("Fifa_rank", "team", "FIFA_points")
names(neues16) = c("team", "CL_players", "UEFA_players", "vicinity", "age_coach", "nationality_coach", "foreigners", "max1", "max2")
names(odds) = c("team", "odds")
names(value) = c("team", "age", "market_value")
value$market_value = as.numeric(gsub(",", ".", as.character(value$market_value)))
res2016$team = as.character(res2016$team)
res2016$opponent = as.character(res2016$opponent)
res2016$german_team = as.character(res2016$german_team)
res2016$german_opponent = as.character(res2016$german_opponent)
uefa16$team = as.character(uefa16$team)
uefa16[uefa16$team == "Republik Irland",]$team = "Irland"
uefa16$UEFA_points = uefa16$UEFA_points / 1000
fifa16$team = as.character(fifa16$team)
fifa16[fifa16$team == "Republik Irland",]$team = "Irland"
neues16$team = as.character(neues16$team)
odds$team = as.character(odds$team)
value$team = as.character(value$team)
res2016$id = res2016$id + max(playoffs$id)
res2016$max2 = NA
res2016$odds = NA
res2016$market_value = NA
res2016$FIFA_points = NA
res2016$UEFA_rank = NA
res2016$UEFA_points = NA
res2016$CL_players = NA
res2016$max2_opponent = NA
res2016$odds_opponent = NA
res2016$market_value_opponent = NA
res2016$FIFA_points_opponent = NA
res2016$UEFA_rank_opponent = NA
res2016$UEFA_points_opponent = NA
res2016$CL_players_opponent = NA
for (i in 1:nrow(res2016)) {
team = res2016[i,]$german_team
opponent = res2016[i,]$german_opponent
res2016[i,]$max2 = neues16[neues16$team == team, "max2"]
res2016[i,]$odds = odds[odds$team == team, "odds"]
res2016[i,]$market_value = value[value$team == team, "market_value"]
res2016[i,]$FIFA_points = fifa16[fifa16$team == team, "FIFA_points"]
res2016[i,]$UEFA_rank = uefa16[uefa16$team == team, "UEFA_rank"]
res2016[i,]$UEFA_points = uefa16[uefa16$team == team, "UEFA_points"]
res2016[i,]$CL_players = neues16[neues16$team == team, "CL_players"]
res2016[i,]$max2_opponent = neues16[neues16$team == opponent, "max2"]
res2016[i,]$odds_opponent = odds[odds$team == opponent, "odds"]
res2016[i,]$market_value_opponent = value[value$team == opponent, "market_value"]
res2016[i,]$FIFA_points_opponent = fifa16[fifa16$team == opponent, "FIFA_points"]
res2016[i,]$UEFA_rank_opponent = uefa16[uefa16$team == opponent, "UEFA_rank"]
res2016[i,]$UEFA_points_opponent = uefa16[uefa16$team == opponent, "UEFA_points"]
res2016[i,]$CL_players_opponent = neues16[neues16$team == opponent, "CL_players"]
}
# reorder goals column
goals16 = res2016$goals
res2016$goals = NULL
res2016$goals = goals16
## remove unwanted columns
## This features were determined to be insignificant
playoffs$year = NULL
playoffs$max1 = NULL
playoffs$max1_opponent = NULL
playoffs$vicinity = NULL
playoffs$vicinity_opponent = NULL
playoffs$foreigners = NULL
playoffs$foreigners_opponent = NULL
playoffs$age_coach = NULL
playoffs$age_coach_opponent = NULL
playoffs$EL_players = NULL
playoffs$EL_players_opponent = NULL
playoffs$GDP = NULL
playoffs$GDP_opponent = NULL
playoffs$population = NULL
playoffs$population_opponent = NULL
playoffs$phase = NULL
playoffs$stage = NULL
playoffs$age = NULL
playoffs$age_opponent = NULL
playoffs$host = NULL
playoffs$host_opponent = NULL
playoffs$FIFA_rank = NULL
playoffs$FIFA_rank_opponent = NULL
playoffs$nationality_coach = NULL
playoffs$nationality_coach_opponent = NULL
# reorder goals
goals04_12 = playoffs$goals
playoffs$goals = NULL
playoffs$goals = goals04_12
## Add 2016 data to the table
res2016$german_team = NULL
res2016$german_opponent = NULL
playoffs = rbind(playoffs, res2016)
## discretize the result of a game to a factor of 3 levels
## win, loss, draw
playoffs$result = NA
for(team1 in playoffs$team) {
for(team2 in playoffs$opponent) {
goals1 = playoffs[playoffs$team == team1 & playoffs$opponent == team2,]$goals
goals2 = playoffs[playoffs$team == team2 & playoffs$opponent == team1,]$goals
playoffs[playoffs$team == team1 & playoffs$opponent == team2,]$result = (goals1 - goals2)
playoffs[playoffs$team == team2 & playoffs$opponent == team1,]$result = (goals2 - goals1)
}
}
# install.packages("arules")
library(arules)
playoffs$result = discretize(playoffs$result, method="fixed", categories = c(-Inf, -0.5, 0.5, +Inf))
levels(playoffs$result) = c("loss", "draw", "win")
# ## add column with scores accoring to the performance in the previous three years
# scores = read.csv("data/Team_scores_08-12.txt", header=TRUE, sep="\t")
# scores = as.data.frame(scores)
# colnames(scores) = c("team", "score_04", "score_08", "score_12")
# playoffs$past_scores = rep(0, length(playoffs$team))
# playoffs$past_scores_opponent = rep(0, length(playoffs$team))
# for (i in 1:length(scores[["team"]])) {
# team = as.character(scores[i,"team"])
# sc4 = scores[i,"score_04"]
# sc8 = scores[i,"score_08"]
# sc12 = scores[i,"score_12"]
# print(team)
# for (j in 1:length(playoffs$team)){
# if(as.character(playoffs[j,]$team) == team){
# if(playoffs[j,]$year == 2004){
# playoffs[j,"past_scores"] = sc4
# }else if(playoffs[j,]$year == 2008){
# playoffs[j,"past_scores"] = sc8
# }else{
# playoffs[j,"past_scores"] = sc12
# }
# }
# }
# for (j in 1:length(playoffs$team)){
# if(as.character(playoffs[j,]$opponent) == team){
# if(playoffs[j,]$year == 2004){
# playoffs[j,"past_scores_opponent"] = sc4
# }else if(playoffs[j,]$year == 2008){
# playoffs[j,"past_scores_opponent"] = sc8
# }else{
# playoffs[j,"past_scores_opponent"] = sc12
# }
# }
# }
# }
append_info = function(df, new_df, col_name){
for (i in 1:length(new_df[["team"]])) {
team = as.character(new_df[i,"team"])
for (j in 1:length(df$team)){
if(as.character(df[j,]$team) == team){
df[j, col_name] = new_df[i, col_name]
}
}
for (j in 1:length(df$opponent)){
if(as.character(df[j,]$opponent) == team){
df[j, paste(col_name, "opponent", sep="_")] = new_df[i, col_name]
}
}
}
return(df)
}
## add column with nation's GDP, population and #won_medals in oly. games
nat_info = read.csv("data/additional_info_countries.csv", header=TRUE, sep=",")
nat_info = as.data.frame(nat_info)
colnames(nat_info) = c("team", "population", "GDP", "olymedals")
playoffs$GDP = rep(0, length(playoffs$team))
playoffs$GDP_opponent = rep(0, length(playoffs$team))
playoffs$population = rep(0, length(playoffs$team))
playoffs$population_opponent = rep(0, length(playoffs$team))
playoffs = append_info(playoffs, nat_info, "GDP")
playoffs = append_info(playoffs, nat_info, "population")
# this one is very tricky so better not use it:
# playoffs$olymedals = rep(0, length(playoffs$team))
# playoffs$olymedals_opponent = rep(0, length(playoffs$team))
# playoffs = append_info(playoffs, nat_info, "olymedals")
## normalization of all not factor-features
for (col_name in names(playoffs)) {
if (!is.factor(playoffs[,col_name]) & col_name != "goals" & col_name != "id"){
playoffs[,col_name] = apply(t(playoffs[, col_name]), 2,
FUN = function(x){(x-min(playoffs[, col_name]))/(max(playoffs[, col_name])-min(playoffs[, col_name]))})
}
}
write.csv(playoffs, file="data/playoffs.csv", row.names = FALSE)
filtered_match_duplicates = playoffs[-(seq(2,to=nrow(playoffs),by=2)),]
write.csv(filtered_match_duplicates, file="data/playoffs_wo_duplicates.csv", row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.os.R
\name{os.predict}
\alias{os.predict}
\title{os.predict}
\usage{
os.predict(
data,
year = 10,
age.start,
screen,
size,
grade,
nodes,
er,
her2,
ki67,
generation,
horm,
traz,
bis
)
}
\arguments{
\item{data}{A dataframe containing patient data with the necessary variables.}
\item{year}{Numeric, Specify the year since surgery for which the predictions are calculated, ranges between 1 and 15. Default at 10.}
\item{age.start}{Numeric, Age at diagnosis of the patient. Range between 25 and 85.}
\item{screen}{Numeric, Clinically detected = 0, Screen detected = 1, Unknown = 2.}
\item{size}{Numeric, Tumor size in millimeters.}
\item{grade}{Numeric, Tumor grade. Values: 1,2,3. Missing=9.}
\item{nodes}{Numeric, Number of positive nodes.}
\item{er}{Numeric, ER status, ER+ = 1, ER- = 0.}
\item{her2}{Numeric, HER2 status, HER2+ = 1, HER2- = 0. Unknown = 9.}
\item{ki67}{Numeric, ki67 status, KI67+ = 1, KI67- = 0, Unknown = 9.}
\item{generation}{Numeric, Chemotherapy generation. Values: 0,2,3..}
\item{horm}{Numeric, Hormone therapy, Yes = 1, No = 0.}
\item{traz}{Numeric, Trastuzumab therapy, Yes = 1, No = 0.}
\item{bis}{Numeric, Bisphosphonate therapy, Yes = 1, No = 0..}
}
\value{
The function attaches additional columns to the dataframe, matched for patient observation,
containing Overall survival at the specified year, plus the additional benefit for each type of therapy.
}
\description{
Calculates 'NHS Predict' v2.1 Overall survival and therapy benefits
}
\examples{
data(example_data)
example_data <- os.predict(example_data,age.start = age,screen = detection,size = t.size,
grade = t.grade, nodes = nodes, er = er.status, her2 = her2.status,
ki67 = ki67.status, generation = chemo.gen, horm = horm.t,
traz = trastuzumab, bis = bis.t)
data(example_data)
example_data <- os.predict(example_data,year = 15, age,detection,t.size,t.grade,
nodes,er.status,her2.status,ki67.status,chemo.gen,horm.t,
trastuzumab,bis.t)
}
| /man/os.predict.Rd | no_license | cran/nhs.predict | R | false | true | 2,180 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.os.R
\name{os.predict}
\alias{os.predict}
\title{os.predict}
\usage{
os.predict(
data,
year = 10,
age.start,
screen,
size,
grade,
nodes,
er,
her2,
ki67,
generation,
horm,
traz,
bis
)
}
\arguments{
\item{data}{A dataframe containing patient data with the necessary variables.}
\item{year}{Numeric, Specify the year since surgery for which the predictions are calculated, ranges between 1 and 15. Default at 10.}
\item{age.start}{Numeric, Age at diagnosis of the patient. Range between 25 and 85.}
\item{screen}{Numeric, Clinically detected = 0, Screen detected = 1, Unknown = 2.}
\item{size}{Numeric, Tumor size in millimeters.}
\item{grade}{Numeric, Tumor grade. Values: 1,2,3. Missing=9.}
\item{nodes}{Numeric, Number of positive nodes.}
\item{er}{Numeric, ER status, ER+ = 1, ER- = 0.}
\item{her2}{Numeric, HER2 status, HER2+ = 1, HER2- = 0. Unknown = 9.}
\item{ki67}{Numeric, ki67 status, KI67+ = 1, KI67- = 0, Unknown = 9.}
\item{generation}{Numeric, Chemotherapy generation. Values: 0,2,3..}
\item{horm}{Numeric, Hormone therapy, Yes = 1, No = 0.}
\item{traz}{Numeric, Trastuzumab therapy, Yes = 1, No = 0.}
\item{bis}{Numeric, Bisphosphonate therapy, Yes = 1, No = 0..}
}
\value{
The function attaches additional columns to the dataframe, matched for patient observation,
containing Overall survival at the specified year, plus the additional benefit for each type of therapy.
}
\description{
Calculates 'NHS Predict' v2.1 Overall survival and therapy benefits
}
\examples{
data(example_data)
example_data <- os.predict(example_data,age.start = age,screen = detection,size = t.size,
grade = t.grade, nodes = nodes, er = er.status, her2 = her2.status,
ki67 = ki67.status, generation = chemo.gen, horm = horm.t,
traz = trastuzumab, bis = bis.t)
data(example_data)
example_data <- os.predict(example_data,year = 15, age,detection,t.size,t.grade,
nodes,er.status,her2.status,ki67.status,chemo.gen,horm.t,
trastuzumab,bis.t)
}
|
###############################################################################
# Function for Regression Stability Analysis
# Raul H. Eyzaguirre P.
###############################################################################
## Required functions
if ('mve.rcbd.met' %in% lsf.str() == F)
if ('MissValEst.R' %in% list.files() == T) source("MissValEst.R") else {
urlfile <- 'https://raw.githubusercontent.com/SweetPotatoImprov/StatTools/master/MissValEst/MissValEst.R'
source(urlfile)
}
## Function RegStab
RegStab <- function(trait, geno, env, rep, data, maxp = 0.05){
# Everything as factor
data[,geno] <- factor(data[,geno])
data[,env] <- factor(data[,env])
data[,rep] <- factor(data[,rep])
# Check data and estimate missing values
lc <- CheckData02(trait, geno, env, rep, data)
if (lc$c1 == 0 | lc$c2 == 0 | lc$c3 == 0){
est.data <- mve.rcbd.met(trait, geno, env, rep, data, maxp, tol = 1e-06)
data[,trait] <- est.data$new.data[,5]
nmis <- est.data$est.num
warning(paste("Warning: The data set is unbalanced, ",
format(est.data$est.prop*100, digits = 3),
"% missing values estimated.", sep = ""))
}
# Error messages
geno.num <- nlevels(data[,geno])
env.num <- nlevels(data[,env])
rep.num <- nlevels(data[,rep])
if (geno.num < 2 | env.num < 2)
stop(paste("Error: This is not a MET experiment."))
if (geno.num == 2 & env.num == 2)
stop(paste("Error: You need at least 3 genotypes or 3 environments for regression stability analysis."))
# Some statistics
int.mean <- tapply(data[,trait], list(data[,geno], data[,env]), mean, na.rm=T)
overall.mean <- mean(int.mean, na.rm=T)
env.mean <- apply(int.mean, 2, mean, na.rm=T)
geno.mean <- apply(int.mean, 1, mean, na.rm=T)
# ANOVA
add.anova <- aov(data[,trait] ~ data[,geno] + data[,env] + data[,rep] %in% data[,env] + data[,geno]:data[,env])
at <- summary(add.anova)
at <- cbind(at[[1]][,1:4], at[[1]][,5])
at[5,1] <- at[5,1] - nmis
at[5,3] <- at[5,2]/at[5,1]
at[1:4,4] <- at[1:4,3]/at[5,3]
at[1:4,5] <- pf(at[1:4,4], at[1:4,1], at[5,1], lower.tail=F)
# Regression-stability for genotypes
a <- NULL
b <- NULL
se <- NULL
ms_dev <- NULL
ms_gxe <- NULL
ms_entry <- NULL
ms_reg <- NULL
for (i in 1:geno.num){
modelo <- lm(int.mean[i,] ~ env.mean)
a[i] <- coef(modelo)[1]
b[i] <- coef(modelo)[2]
se[i] <- summary.lm(modelo)$coefficients[2,2]
ms_dev[i] <- anova(modelo)[2,3]
ms_gxe[i] <- sum((int.mean[i,] - geno.mean[i] - env.mean + overall.mean)^2)/(env.num - 1)
ms_entry[i] <- sum((int.mean[i,] - geno.mean[i])^2)/(env.num - 1)
}
stability_geno <- cbind(b, se, ms_dev, ms_entry, ms_gxe)
row.names(stability_geno) <- levels(data[,geno])
names(a) <- levels(data[,geno])
names(b) <- levels(data[,geno])
if (env.num > 2){
x <- NULL
ypred <- NULL
ymean <- NULL
for (i in 1:length(data[,trait])){
x[i] <- env.mean[names(env.mean)==data[i,env]]
ypred[i] <- a[names(a)==data[i,geno]] + b[names(b)==data[i,geno]]*x[i]
ymean[i] <- int.mean[row.names(int.mean)==data[i,geno], colnames(int.mean)==data[i,env]]
}
drg_sc <- sum((ypred - ymean)^2)
hrg_gl <- geno.num - 1
drg_gl <- (geno.num - 1)*(env.num - 1) - hrg_gl
drg_cm <- drg_sc/drg_gl
hrg_sc <- at[4,2] - drg_sc
hrg_cm <- hrg_sc/hrg_gl
hrg_f <- hrg_cm/drg_cm
hrg_p <- pf(hrg_f, hrg_gl, drg_gl, lower.tail=F)
drg_f <- drg_cm/at[5,3]
drg_p <- pf(drg_f, drg_gl, at[5,1], lower.tail=F)
} else {
drg_sc <- NA
hrg_gl <- NA
drg_gl <- NA
drg_cm <- NA
hrg_sc <- NA
hrg_cm <- NA
hrg_f <- NA
hrg_p <- NA
drg_f <- NA
drg_p <- NA
}
# Regression-stability for environments
a <- NULL
b <- NULL
se <- NULL
ms_dev <- NULL
ms_gxe <- NULL
ms_entry <- NULL
ms_reg <- NULL
for (i in 1:env.num){
modelo <- lm(int.mean[,i] ~ geno.mean)
a[i] <- coef(modelo)[1]
b[i] <- coef(modelo)[2]
se[i] <- summary.lm(modelo)$coefficients[2,2]
ms_dev[i] <- anova(modelo)[2,3]
ms_gxe[i] <- sum((int.mean[,i] - env.mean[i] - geno.mean + overall.mean)^2)/(geno.num - 1)
ms_entry[i] <- sum((int.mean[,i] - env.mean[i])^2)/(geno.num - 1)
}
stability_env <- cbind(b, se, ms_dev, ms_entry, ms_gxe)
row.names(stability_env) <- levels(data[,env])
names(a) <- levels(data[,env])
names(b) <- levels(data[,env])
if (geno.num > 2){
x <- NULL
ypred <- NULL
ymean <- NULL
for (i in 1:length(data[,trait])){
x[i] <- geno.mean[names(geno.mean)==data[i,geno]]
ypred[i] <- a[names(a)==data[i,env]] + b[names(b)==data[i,env]]*x[i]
ymean[i] <- int.mean[row.names(int.mean)==data[i,geno], colnames(int.mean)==data[i,env]]
}
dre_sc <- sum((ypred-ymean)^2)
hre_gl <- env.num - 1
dre_gl <- (geno.num - 1)*(env.num - 1) - hre_gl
dre_cm <- dre_sc/dre_gl
hre_sc <- at[4,2] - dre_sc
hre_cm <- hre_sc/hre_gl
hre_f <- hre_cm/dre_cm
hre_p <- pf(hre_f, hre_gl, dre_gl, lower.tail=F)
dre_f <- dre_cm/at[5,3]
dre_p <- pf(dre_f, dre_gl, at[5,1], lower.tail=F)
} else {
dre_sc <- NA
hre_gl <- NA
dre_gl <- NA
dre_cm <- NA
hre_sc <- NA
hre_cm <- NA
hre_f <- NA
hre_p <- NA
dre_f <- NA
dre_p <- NA
}
# ANOVA plus regression stability
at[2,4] <- at[2,3]/at[3,3]
at[2,5] <- pf(at[2,4], at[2,1], at[3,1], lower.tail=F)
filaux <- at[5,]
at[5,] <- c(hrg_gl, hrg_sc, hrg_cm, hrg_f, hrg_p)
at <- rbind(at, c(drg_gl, drg_sc, drg_cm, drg_f, drg_p))
at <- rbind(at, c(hre_gl, hre_sc, hre_cm, hre_f, hre_p))
at <- rbind(at, c(dre_gl, dre_sc, dre_cm, dre_f, dre_p))
at[9,] <- filaux
row.names(at) <- c("G", "E", "R:E", "GxE", "- Het.Regr.G", "- Dev.Regr.G",
"- Het.Regr.E", "- Dev.Regr.E", "Residuals")
colnames(at)[5] <- "Pr(>F)"
cv <- sqrt(at[5,3])/abs(overall.mean)*100
# Return
list(anova.table = format(at, digits=4), cv = paste(format(cv, digits=4), "%"))
}
| /RegStab/RegStab.R | no_license | lagvier/StatTools | R | false | false | 6,101 | r | ###############################################################################
# Function for Regression Stability Analysis
# Raul H. Eyzaguirre P.
###############################################################################
## Required functions
if ('mve.rcbd.met' %in% lsf.str() == F)
if ('MissValEst.R' %in% list.files() == T) source("MissValEst.R") else {
urlfile <- 'https://raw.githubusercontent.com/SweetPotatoImprov/StatTools/master/MissValEst/MissValEst.R'
source(urlfile)
}
## Function RegStab
RegStab <- function(trait, geno, env, rep, data, maxp = 0.05){
# Everything as factor
data[,geno] <- factor(data[,geno])
data[,env] <- factor(data[,env])
data[,rep] <- factor(data[,rep])
# Check data and estimate missing values
lc <- CheckData02(trait, geno, env, rep, data)
if (lc$c1 == 0 | lc$c2 == 0 | lc$c3 == 0){
est.data <- mve.rcbd.met(trait, geno, env, rep, data, maxp, tol = 1e-06)
data[,trait] <- est.data$new.data[,5]
nmis <- est.data$est.num
warning(paste("Warning: The data set is unbalanced, ",
format(est.data$est.prop*100, digits = 3),
"% missing values estimated.", sep = ""))
}
# Error messages
geno.num <- nlevels(data[,geno])
env.num <- nlevels(data[,env])
rep.num <- nlevels(data[,rep])
if (geno.num < 2 | env.num < 2)
stop(paste("Error: This is not a MET experiment."))
if (geno.num == 2 & env.num == 2)
stop(paste("Error: You need at least 3 genotypes or 3 environments for regression stability analysis."))
# Some statistics
int.mean <- tapply(data[,trait], list(data[,geno], data[,env]), mean, na.rm=T)
overall.mean <- mean(int.mean, na.rm=T)
env.mean <- apply(int.mean, 2, mean, na.rm=T)
geno.mean <- apply(int.mean, 1, mean, na.rm=T)
# ANOVA
add.anova <- aov(data[,trait] ~ data[,geno] + data[,env] + data[,rep] %in% data[,env] + data[,geno]:data[,env])
at <- summary(add.anova)
at <- cbind(at[[1]][,1:4], at[[1]][,5])
at[5,1] <- at[5,1] - nmis
at[5,3] <- at[5,2]/at[5,1]
at[1:4,4] <- at[1:4,3]/at[5,3]
at[1:4,5] <- pf(at[1:4,4], at[1:4,1], at[5,1], lower.tail=F)
# Regression-stability for genotypes
a <- NULL
b <- NULL
se <- NULL
ms_dev <- NULL
ms_gxe <- NULL
ms_entry <- NULL
ms_reg <- NULL
for (i in 1:geno.num){
modelo <- lm(int.mean[i,] ~ env.mean)
a[i] <- coef(modelo)[1]
b[i] <- coef(modelo)[2]
se[i] <- summary.lm(modelo)$coefficients[2,2]
ms_dev[i] <- anova(modelo)[2,3]
ms_gxe[i] <- sum((int.mean[i,] - geno.mean[i] - env.mean + overall.mean)^2)/(env.num - 1)
ms_entry[i] <- sum((int.mean[i,] - geno.mean[i])^2)/(env.num - 1)
}
stability_geno <- cbind(b, se, ms_dev, ms_entry, ms_gxe)
row.names(stability_geno) <- levels(data[,geno])
names(a) <- levels(data[,geno])
names(b) <- levels(data[,geno])
if (env.num > 2){
x <- NULL
ypred <- NULL
ymean <- NULL
for (i in 1:length(data[,trait])){
x[i] <- env.mean[names(env.mean)==data[i,env]]
ypred[i] <- a[names(a)==data[i,geno]] + b[names(b)==data[i,geno]]*x[i]
ymean[i] <- int.mean[row.names(int.mean)==data[i,geno], colnames(int.mean)==data[i,env]]
}
drg_sc <- sum((ypred - ymean)^2)
hrg_gl <- geno.num - 1
drg_gl <- (geno.num - 1)*(env.num - 1) - hrg_gl
drg_cm <- drg_sc/drg_gl
hrg_sc <- at[4,2] - drg_sc
hrg_cm <- hrg_sc/hrg_gl
hrg_f <- hrg_cm/drg_cm
hrg_p <- pf(hrg_f, hrg_gl, drg_gl, lower.tail=F)
drg_f <- drg_cm/at[5,3]
drg_p <- pf(drg_f, drg_gl, at[5,1], lower.tail=F)
} else {
drg_sc <- NA
hrg_gl <- NA
drg_gl <- NA
drg_cm <- NA
hrg_sc <- NA
hrg_cm <- NA
hrg_f <- NA
hrg_p <- NA
drg_f <- NA
drg_p <- NA
}
# Regression-stability for environments
a <- NULL
b <- NULL
se <- NULL
ms_dev <- NULL
ms_gxe <- NULL
ms_entry <- NULL
ms_reg <- NULL
for (i in 1:env.num){
modelo <- lm(int.mean[,i] ~ geno.mean)
a[i] <- coef(modelo)[1]
b[i] <- coef(modelo)[2]
se[i] <- summary.lm(modelo)$coefficients[2,2]
ms_dev[i] <- anova(modelo)[2,3]
ms_gxe[i] <- sum((int.mean[,i] - env.mean[i] - geno.mean + overall.mean)^2)/(geno.num - 1)
ms_entry[i] <- sum((int.mean[,i] - env.mean[i])^2)/(geno.num - 1)
}
stability_env <- cbind(b, se, ms_dev, ms_entry, ms_gxe)
row.names(stability_env) <- levels(data[,env])
names(a) <- levels(data[,env])
names(b) <- levels(data[,env])
if (geno.num > 2){
x <- NULL
ypred <- NULL
ymean <- NULL
for (i in 1:length(data[,trait])){
x[i] <- geno.mean[names(geno.mean)==data[i,geno]]
ypred[i] <- a[names(a)==data[i,env]] + b[names(b)==data[i,env]]*x[i]
ymean[i] <- int.mean[row.names(int.mean)==data[i,geno], colnames(int.mean)==data[i,env]]
}
dre_sc <- sum((ypred-ymean)^2)
hre_gl <- env.num - 1
dre_gl <- (geno.num - 1)*(env.num - 1) - hre_gl
dre_cm <- dre_sc/dre_gl
hre_sc <- at[4,2] - dre_sc
hre_cm <- hre_sc/hre_gl
hre_f <- hre_cm/dre_cm
hre_p <- pf(hre_f, hre_gl, dre_gl, lower.tail=F)
dre_f <- dre_cm/at[5,3]
dre_p <- pf(dre_f, dre_gl, at[5,1], lower.tail=F)
} else {
dre_sc <- NA
hre_gl <- NA
dre_gl <- NA
dre_cm <- NA
hre_sc <- NA
hre_cm <- NA
hre_f <- NA
hre_p <- NA
dre_f <- NA
dre_p <- NA
}
# ANOVA plus regression stability
at[2,4] <- at[2,3]/at[3,3]
at[2,5] <- pf(at[2,4], at[2,1], at[3,1], lower.tail=F)
filaux <- at[5,]
at[5,] <- c(hrg_gl, hrg_sc, hrg_cm, hrg_f, hrg_p)
at <- rbind(at, c(drg_gl, drg_sc, drg_cm, drg_f, drg_p))
at <- rbind(at, c(hre_gl, hre_sc, hre_cm, hre_f, hre_p))
at <- rbind(at, c(dre_gl, dre_sc, dre_cm, dre_f, dre_p))
at[9,] <- filaux
row.names(at) <- c("G", "E", "R:E", "GxE", "- Het.Regr.G", "- Dev.Regr.G",
"- Het.Regr.E", "- Dev.Regr.E", "Residuals")
colnames(at)[5] <- "Pr(>F)"
cv <- sqrt(at[5,3])/abs(overall.mean)*100
# Return
list(anova.table = format(at, digits=4), cv = paste(format(cv, digits=4), "%"))
}
|
setClass("closure",
representation(
hypotheses = "character", # names of hypotheses
alpha = "numeric", # stores chosen alpha for testing
adjusted = "numeric", # stores adjusted p-values
# (has value 1 if adjusted p > alpha)
# (numeric(0) if no adjusted p-values calculated)
max.alpha = "numeric", # highest alpha at which adjusted p-values are calculated
# value NA if no adjusted p-values present
defining = "integer" # defining hypotheses at chosen alpha
# NA if alpha = NA
)
)
###
# This function performs closed testing
###
closed <- function(test, hypotheses, alpha = 0.05, adjust = FALSE) {
# reverse argument order if mistaken
if (is(test, "character") && is(hypotheses, "function")) {
tmp <- test
test <- hypotheses
hypotheses <- tmp
}
# alpha=NA means adjusted p-values
if ((!missing(alpha)) && is.na(alpha)) {
alpha <- 1
adjust <- TRUE
}
# default of alpha is 1 if adjust = TRUE
if (adjust && missing(alpha))
alpha <- 1
# preparation of closure
N <- length(hypotheses)
Nmax <- log2(.Machine$integer.max+1)
if (N > Nmax)
stop("no more than ", Nmax, " hypotheses supported in full closed testing.\n Use a shortcut-based test.")
closure <- 1:(2^N-1)
base <- 2^(1:N-1)
# finds offspring hypotheses of a hypothesis (NB including self)
offspring <- function(x) {
res <- bitAnd(x, closure)
res[res != 0]
}
# sort the closure to decreasing number of participating hypotheses
lengths <- rowSums(sapply(base, function(bs) bitAnd(closure, bs) != 0))
closure <- closure[sort.list(lengths, decreasing = TRUE)]
# perform closed testing (adjusted p variant)
if (adjust) {
adjusted <- numeric(2^N-1)
for (i in closure) {
if (adjusted[i] < 1) {
localtest <- test(hypotheses[.bit2boolean(i, N)])
if (localtest > alpha) # p-values over alpha are set to 1 to save calculations
localtest <- 1
if (localtest > adjusted[i]) {
offs <- offspring(i)
adjusted[offs] <- pmax(adjusted[offs], localtest)
}
}
}
def <- as.integer(NA)
} else {
# perform closed testing (rejection variant)
rejected <- !logical(2^N-1)
for (i in closure) {
if (rejected[i]) {
localtest <- test(hypotheses[.bit2boolean(i, N)])
if (localtest > alpha) {
offs <- offspring(i)
rejected[offs] <- FALSE
}
}
}
adjusted <- numeric(0)
def <- .defining(which(rejected), N)
}
# reduce: find defining rejections
# return
max.alpha <- ifelse(adjust, alpha, as.numeric(NA))
alpha <- ifelse(adjust, as.numeric(NA), alpha)
out <- new("closure", hypotheses = hypotheses,
alpha = alpha,
adjusted = adjusted,
max.alpha = max.alpha,
defining = def)
return(out)
}
###
# show method for closure object
###
setMethod("show", "closure", function(object) {
cat("Closed testing result on", length(object@hypotheses), "elementary hypotheses.\n")
if (!is.na(object@alpha)) {
cat("At confidence level ", 1-object@alpha, ": ", sep="")
res <- pick(object, object@hypotheses, silent=TRUE)
cat("False hypotheses >= ", res, "; ", sep="")
cat("True hypotheses <= ", length(object@hypotheses) - res, ".\n", sep="")
}
object
})
###
# slot accession methods for closure object
###
setGeneric("hypotheses", function(object, ...) standardGeneric("hypotheses"))
setMethod("hypotheses", "closure", function(object, ...) {
object@hypotheses
})
setGeneric("alpha", function(object, ...) standardGeneric("alpha"))
setMethod("alpha", "closure", function(object, ...) {
object@alpha
})
setGeneric("alpha<-", function(object, value) standardGeneric("alpha<-"))
setMethod("alpha<-", "closure", function(object, value) {
if (length(object@adjusted)==0)
stop("Only closure objects with adjusted p-values can reset alpha.")
if (is.null(value) || is.na(value)) {
object@alpha <- as.numeric(NA)
object@defining <- integer(0)
} else {
if (value > object@max.alpha)
stop("Adjusted p-values only available up to alpha = ", object@max.alpha)
object@alpha <- value
rejected <- (object@adjusted <= value)
object@defining <- .defining(which(rejected), length(object@hypotheses))
}
object
})
setGeneric("defining", function(object, alpha, ...) standardGeneric("defining"))
setMethod("defining", "closure", function(object, alpha, ...) {
if (missing(alpha)) {
if (is.na(object@alpha)) {
if (object@max.alpha < 1)
alpha <- object@max.alpha
else
stop("Please specify alpha.")
}
} else
alpha(object) <- alpha
.num2names(object@defining, object@hypotheses)
})
setGeneric("shortlist", function(object, alpha, ...) standardGeneric("shortlist"))
setMethod("shortlist", "closure", function(object, alpha, ...) {
if (missing(alpha)) {
if (is.na(object@alpha)) {
if (object@max.alpha < 1)
alpha <- object@max.alpha
else
stop("Please specify alpha.")
}
} else
alpha(object) <- alpha
.num2names(.shortlist(object), object@hypotheses)
})
adjusted <- function(closure, reject, n = 0) {
# check if adjusted p available
if (is.na(closure@max.alpha))
stop("no adjusted p-values in this closure object")
# transform to number
N <- length(closure@hypotheses)
reject <- which(closure@hypotheses %in% reject)
M <- length(reject)
reject <- sum(2^(reject-1))
if (n>M)
stop("value of n larger than number of rejected hypotheses")
clos <- 1:(2^N-1)
base <- 2^(1:N-1)
interest <- unique(bitAnd(reject, clos))
lengths <- lapply(base, function(bs) bitAnd(interest, bs) != 0)
lengths <- rowSums(do.call(cbind, lengths))
interest <- interest[lengths==M-n]
return(max(closure@adjusted[interest]))
}
pick <- function(closure, reject, alpha, silent = FALSE, plot = FALSE) {
# preparation: create closure
N <- length(closure@hypotheses)
if (missing(reject)) reject <- closure@hypotheses
reject <- which(closure@hypotheses %in% reject)
M <- length(reject)
clos <- 1:(2^N-1)
base <- 2^(1:N-1)
# the part of the closure that actually matters for this pick
interest <- unique(bitAnd(sum(2^(reject-1)), clos))
interest <- interest[interest>0]
# should adjusted p variant be used?
if (!missing(alpha))
alpha(closure) <- alpha
if (!is.na(closure@alpha)) {
# fixed alpha variant
isAncestor <- function(x,y) { # is x an ancestor of y?
bitOr(x,y) == x
}
interest <- interest[!apply(outer(interest, closure@defining, isAncestor), 1, any)]
lengths <- lapply(base, function(bs) bitAnd(interest, bs) != 0)
lengths <- rowSums(do.call(cbind, lengths))
out <- max(lengths,0)
if (!silent) {
cat(length(reject), " hypotheses selected. At confidence level ", 1-closure@alpha, ":\n", sep="")
cat("False null-hypotheses >= ", length(reject)-out, "; ", sep="")
cat("True null-hypotheses <= ", out, ".\n", sep="")
return(invisible(length(reject)-out))
} else
return(length(reject)-out)
} else {
# adjusted p variant
lengths <- lapply(base, function(bs) bitAnd(interest, bs) != 0)
lengths <- rowSums(do.call(cbind, lengths))
cumulative <- tapply(closure@adjusted[interest], lengths, max)
diffs <- rev(diff(rev(c("0"=1, cumulative, 0))))
out <- data.frame(alpha = rev(cumulative),
confidence = 1-rev(cumulative),
"true<=" = M:1-1,
"false>=" = 1:M, check.names=F, row.names=1:M)
if (plot) {
bp <- barplot(diffs, xlab="True hypotheses", ylab = "Confidence probability mass function", ylim=c(0,max(diffs)*1.1))
mids <- (bp[-1] + bp[-length(bp)])/2
text(mids, max(diffs), round(1-cumsum(diffs)[-length(diffs)],3), pos=3)
}
out
}
}
###
# helper functions
###
# converts integer to set notation
.num2names <- function(rejected, vars) {
N <- length(vars)
bools <- lapply(rejected, .bit2boolean, N=N)
lapply(bools, function(b) vars[b])
}
# converts from integer to boolean (as binary)
.bit2boolean <- function(x, N) {
base <- 2^(1:N-1)
bitAnd(x, base) != 0
}
# gets the defining rejections from all rejections
.defining <- function(rejected, N) {
closure <- 1:(2^N-1)
ancestors <- function(x) {
bitOr(x, closure)
}
isdone <- integer(0)
todo <- rejected
while (length(todo) > 0) {
isdone <- c(setdiff(isdone, ancestors(todo[1])), todo[1])
todo <- setdiff(todo, ancestors(todo[1]))
}
isdone
}
# reverses defining hypotheses from (A or B or ...) and (C or D or ...) and ...
# to (A and C and ...) or (B and C and ...) or ...
# result is still in bit-form, to be transformed with .num2names()
# reverses defining hypotheses from (A or B or ...) and (C or D or ...) and ...
# to (A and C and ...) or (B and C and ...) or ...
# result is still in bit-form, to be transformed with .num2names()
.shortlist <- function(cl) {
N <- length(cl@hypotheses)
M <- length(cl@defining)
base <- 2^(1:N-1)
res <- 0
for (i in 1:M) {
whichs <- which(bitAnd(cl@defining[i], base) != 0)
comb <- outer(res, 2^(whichs-1), bitOr)
res <- unique(c(comb))
}
isAncestor <- function(x,y) { # is x an ancestor of y?
bitOr(x,y) == x
}
ancs <- outer(res, res, isAncestor)
diag(ancs) <- FALSE
res[!apply(ancs, 1, any)]
}
.shortlist_old <- function(cl) {
N <- length(cl@hypotheses)
M <- length(cl@defining)
base <- 2^(1:N-1)
lengths <- sapply(cl@defining, function(x) sum(bitAnd(x, base) != 0))
total <- prod(lengths)
whichs <- lapply(cl@defining, function(x) which(bitAnd(x, base) != 0))
ands <- unique(sapply(1:total, function(k) {
ix <- (k-1) %/% (total/cumprod(lengths)) %% lengths + 1
choice <- sapply(1:M, function(i) whichs[[i]][ix[i]])
sum(2^(unique(choice)-1))
}))
isAncestor <- function(x,y) { # is x an ancestor of y?
bitOr(x,y) == x
}
ancs <- outer(ands, ands, isAncestor)
diag(ancs) <- FALSE
ands[!apply(ancs, 1, any)]
}
| /R/closed.R | no_license | cran/cherry | R | false | false | 10,614 | r | setClass("closure",
representation(
hypotheses = "character", # names of hypotheses
alpha = "numeric", # stores chosen alpha for testing
adjusted = "numeric", # stores adjusted p-values
# (has value 1 if adjusted p > alpha)
# (numeric(0) if no adjusted p-values calculated)
max.alpha = "numeric", # highest alpha at which adjusted p-values are calculated
# value NA if no adjusted p-values present
defining = "integer" # defining hypotheses at chosen alpha
# NA if alpha = NA
)
)
###
# This function performs closed testing
###
closed <- function(test, hypotheses, alpha = 0.05, adjust = FALSE) {
# reverse argument order if mistaken
if (is(test, "character") && is(hypotheses, "function")) {
tmp <- test
test <- hypotheses
hypotheses <- tmp
}
# alpha=NA means adjusted p-values
if ((!missing(alpha)) && is.na(alpha)) {
alpha <- 1
adjust <- TRUE
}
# default of alpha is 1 if adjust = TRUE
if (adjust && missing(alpha))
alpha <- 1
# preparation of closure
N <- length(hypotheses)
Nmax <- log2(.Machine$integer.max+1)
if (N > Nmax)
stop("no more than ", Nmax, " hypotheses supported in full closed testing.\n Use a shortcut-based test.")
closure <- 1:(2^N-1)
base <- 2^(1:N-1)
# finds offspring hypotheses of a hypothesis (NB including self)
offspring <- function(x) {
res <- bitAnd(x, closure)
res[res != 0]
}
# sort the closure to decreasing number of participating hypotheses
lengths <- rowSums(sapply(base, function(bs) bitAnd(closure, bs) != 0))
closure <- closure[sort.list(lengths, decreasing = TRUE)]
# perform closed testing (adjusted p variant)
if (adjust) {
adjusted <- numeric(2^N-1)
for (i in closure) {
if (adjusted[i] < 1) {
localtest <- test(hypotheses[.bit2boolean(i, N)])
if (localtest > alpha) # p-values over alpha are set to 1 to save calculations
localtest <- 1
if (localtest > adjusted[i]) {
offs <- offspring(i)
adjusted[offs] <- pmax(adjusted[offs], localtest)
}
}
}
def <- as.integer(NA)
} else {
# perform closed testing (rejection variant)
rejected <- !logical(2^N-1)
for (i in closure) {
if (rejected[i]) {
localtest <- test(hypotheses[.bit2boolean(i, N)])
if (localtest > alpha) {
offs <- offspring(i)
rejected[offs] <- FALSE
}
}
}
adjusted <- numeric(0)
def <- .defining(which(rejected), N)
}
# reduce: find defining rejections
# return
max.alpha <- ifelse(adjust, alpha, as.numeric(NA))
alpha <- ifelse(adjust, as.numeric(NA), alpha)
out <- new("closure", hypotheses = hypotheses,
alpha = alpha,
adjusted = adjusted,
max.alpha = max.alpha,
defining = def)
return(out)
}
###
# show method for closure object
###
setMethod("show", "closure", function(object) {
cat("Closed testing result on", length(object@hypotheses), "elementary hypotheses.\n")
if (!is.na(object@alpha)) {
cat("At confidence level ", 1-object@alpha, ": ", sep="")
res <- pick(object, object@hypotheses, silent=TRUE)
cat("False hypotheses >= ", res, "; ", sep="")
cat("True hypotheses <= ", length(object@hypotheses) - res, ".\n", sep="")
}
object
})
###
# slot accession methods for closure object
###
setGeneric("hypotheses", function(object, ...) standardGeneric("hypotheses"))
setMethod("hypotheses", "closure", function(object, ...) {
object@hypotheses
})
setGeneric("alpha", function(object, ...) standardGeneric("alpha"))
setMethod("alpha", "closure", function(object, ...) {
object@alpha
})
setGeneric("alpha<-", function(object, value) standardGeneric("alpha<-"))
setMethod("alpha<-", "closure", function(object, value) {
if (length(object@adjusted)==0)
stop("Only closure objects with adjusted p-values can reset alpha.")
if (is.null(value) || is.na(value)) {
object@alpha <- as.numeric(NA)
object@defining <- integer(0)
} else {
if (value > object@max.alpha)
stop("Adjusted p-values only available up to alpha = ", object@max.alpha)
object@alpha <- value
rejected <- (object@adjusted <= value)
object@defining <- .defining(which(rejected), length(object@hypotheses))
}
object
})
setGeneric("defining", function(object, alpha, ...) standardGeneric("defining"))
setMethod("defining", "closure", function(object, alpha, ...) {
if (missing(alpha)) {
if (is.na(object@alpha)) {
if (object@max.alpha < 1)
alpha <- object@max.alpha
else
stop("Please specify alpha.")
}
} else
alpha(object) <- alpha
.num2names(object@defining, object@hypotheses)
})
setGeneric("shortlist", function(object, alpha, ...) standardGeneric("shortlist"))
setMethod("shortlist", "closure", function(object, alpha, ...) {
if (missing(alpha)) {
if (is.na(object@alpha)) {
if (object@max.alpha < 1)
alpha <- object@max.alpha
else
stop("Please specify alpha.")
}
} else
alpha(object) <- alpha
.num2names(.shortlist(object), object@hypotheses)
})
adjusted <- function(closure, reject, n = 0) {
# check if adjusted p available
if (is.na(closure@max.alpha))
stop("no adjusted p-values in this closure object")
# transform to number
N <- length(closure@hypotheses)
reject <- which(closure@hypotheses %in% reject)
M <- length(reject)
reject <- sum(2^(reject-1))
if (n>M)
stop("value of n larger than number of rejected hypotheses")
clos <- 1:(2^N-1)
base <- 2^(1:N-1)
interest <- unique(bitAnd(reject, clos))
lengths <- lapply(base, function(bs) bitAnd(interest, bs) != 0)
lengths <- rowSums(do.call(cbind, lengths))
interest <- interest[lengths==M-n]
return(max(closure@adjusted[interest]))
}
pick <- function(closure, reject, alpha, silent = FALSE, plot = FALSE) {
# preparation: create closure
N <- length(closure@hypotheses)
if (missing(reject)) reject <- closure@hypotheses
reject <- which(closure@hypotheses %in% reject)
M <- length(reject)
clos <- 1:(2^N-1)
base <- 2^(1:N-1)
# the part of the closure that actually matters for this pick
interest <- unique(bitAnd(sum(2^(reject-1)), clos))
interest <- interest[interest>0]
# should adjusted p variant be used?
if (!missing(alpha))
alpha(closure) <- alpha
if (!is.na(closure@alpha)) {
# fixed alpha variant
isAncestor <- function(x,y) { # is x an ancestor of y?
bitOr(x,y) == x
}
interest <- interest[!apply(outer(interest, closure@defining, isAncestor), 1, any)]
lengths <- lapply(base, function(bs) bitAnd(interest, bs) != 0)
lengths <- rowSums(do.call(cbind, lengths))
out <- max(lengths,0)
if (!silent) {
cat(length(reject), " hypotheses selected. At confidence level ", 1-closure@alpha, ":\n", sep="")
cat("False null-hypotheses >= ", length(reject)-out, "; ", sep="")
cat("True null-hypotheses <= ", out, ".\n", sep="")
return(invisible(length(reject)-out))
} else
return(length(reject)-out)
} else {
# adjusted p variant
lengths <- lapply(base, function(bs) bitAnd(interest, bs) != 0)
lengths <- rowSums(do.call(cbind, lengths))
cumulative <- tapply(closure@adjusted[interest], lengths, max)
diffs <- rev(diff(rev(c("0"=1, cumulative, 0))))
out <- data.frame(alpha = rev(cumulative),
confidence = 1-rev(cumulative),
"true<=" = M:1-1,
"false>=" = 1:M, check.names=F, row.names=1:M)
if (plot) {
bp <- barplot(diffs, xlab="True hypotheses", ylab = "Confidence probability mass function", ylim=c(0,max(diffs)*1.1))
mids <- (bp[-1] + bp[-length(bp)])/2
text(mids, max(diffs), round(1-cumsum(diffs)[-length(diffs)],3), pos=3)
}
out
}
}
###
# helper functions
###
# converts integer to set notation
.num2names <- function(rejected, vars) {
N <- length(vars)
bools <- lapply(rejected, .bit2boolean, N=N)
lapply(bools, function(b) vars[b])
}
# converts from integer to boolean (as binary)
.bit2boolean <- function(x, N) {
base <- 2^(1:N-1)
bitAnd(x, base) != 0
}
# gets the defining rejections from all rejections
.defining <- function(rejected, N) {
closure <- 1:(2^N-1)
ancestors <- function(x) {
bitOr(x, closure)
}
isdone <- integer(0)
todo <- rejected
while (length(todo) > 0) {
isdone <- c(setdiff(isdone, ancestors(todo[1])), todo[1])
todo <- setdiff(todo, ancestors(todo[1]))
}
isdone
}
# reverses defining hypotheses from (A or B or ...) and (C or D or ...) and ...
# to (A and C and ...) or (B and C and ...) or ...
# result is still in bit-form, to be transformed with .num2names()
# reverses defining hypotheses from (A or B or ...) and (C or D or ...) and ...
# to (A and C and ...) or (B and C and ...) or ...
# result is still in bit-form, to be transformed with .num2names()
.shortlist <- function(cl) {
N <- length(cl@hypotheses)
M <- length(cl@defining)
base <- 2^(1:N-1)
res <- 0
for (i in 1:M) {
whichs <- which(bitAnd(cl@defining[i], base) != 0)
comb <- outer(res, 2^(whichs-1), bitOr)
res <- unique(c(comb))
}
isAncestor <- function(x,y) { # is x an ancestor of y?
bitOr(x,y) == x
}
ancs <- outer(res, res, isAncestor)
diag(ancs) <- FALSE
res[!apply(ancs, 1, any)]
}
.shortlist_old <- function(cl) {
N <- length(cl@hypotheses)
M <- length(cl@defining)
base <- 2^(1:N-1)
lengths <- sapply(cl@defining, function(x) sum(bitAnd(x, base) != 0))
total <- prod(lengths)
whichs <- lapply(cl@defining, function(x) which(bitAnd(x, base) != 0))
ands <- unique(sapply(1:total, function(k) {
ix <- (k-1) %/% (total/cumprod(lengths)) %% lengths + 1
choice <- sapply(1:M, function(i) whichs[[i]][ix[i]])
sum(2^(unique(choice)-1))
}))
isAncestor <- function(x,y) { # is x an ancestor of y?
bitOr(x,y) == x
}
ancs <- outer(ands, ands, isAncestor)
diag(ancs) <- FALSE
ands[!apply(ancs, 1, any)]
}
|
if (!exists("all.terms")) {
message("Loading all possible terms from file ...")
load("/Users/khozzy/GodStemIt/IsItGood/src/main/resources/R/data/all.terms.RData")
} | /IsItGood/src/main/resources/R/loadAllTerms.R | no_license | Libardo1/GodStemIt | R | false | false | 168 | r | if (!exists("all.terms")) {
message("Loading all possible terms from file ...")
load("/Users/khozzy/GodStemIt/IsItGood/src/main/resources/R/data/all.terms.RData")
} |
#' Unit Conversion - SI Prefixes - Base to Mega-
#'
#' Mega- millions, 1 000 000, or 10^6
#'
#' Performs a conversion from base units to mega-units (ex. grams to megagrams).
#'
#' @param x Vector - Values in units of base units
#'
#' @return x, but converted to mega-units
#'
#' @references
#' NIST. Metric (SI) Prefixes. 2022. Accessed 4/7/2022.
#' https://www.nist.gov/pml/weights-and-measures/metric-si-prefixes
unitconversion.siprefix.base.to.mega <- function(
x = 1
) {
x / (10^6)
}
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.second.to.megasecond <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.s.to.Ms <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.meter.to.megameter <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.m.to.Mm <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.gram.to.megagram <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.g.to.Mg <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.ampere.to.megaampere <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.A.to.MA <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.kelvin.to.megakelvin <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.K.to.MK <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.mole.to.megamole <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.mol.to.Mmol <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.candela.to.megacandela <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.cd.to.Mcd <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.radian.to.megaradian <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.rad.to.Mrad <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.steradian.to.megasteradian <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.sr.to.Msr <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.hertz.to.megahertz <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.Hz.to.MHz <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.newton.to.meganewton <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.N.to.MN <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.pascal.to.megapascal <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.Pa.to.MPa <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.joule.to.megajoule <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.J.to.MJ <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.watt.to.megawatt <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.W.to.MW <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.coulomb.to.megacoulomb <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.C.to.MC <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.volt.to.megavolt <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.V.to.MV <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.farad.to.megafarad <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.F.to.MF <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.ohm.to.megaohm <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.siemens.to.megasiemens <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.S.to.MS <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.weber.to.megaweber <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.Wb.to.MWb <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.tesla.to.megatesla <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.T.to.MT <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.henry.to.megahenry <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.H.to.MH <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.lumen.to.megalumen <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.lm.to.Mlm <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.lux.to.megalux <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.lx.to.Mlx <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.becquerel.to.megabecquerel <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.Bq.to.MBq <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.gray.to.megagray <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.Gy.to.MGy <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.sievert.to.megasievert <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.Sv.to.MSv <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.katal.to.megakatal <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.kat.to.Mkat <- unitconversion.siprefix.base.to.mega
| /R/unitconversion.siprefix.base.to.mega.R | permissive | burrm/lolcat | R | false | false | 7,028 | r | #' Unit Conversion - SI Prefixes - Base to Mega-
#'
#' Mega- millions, 1 000 000, or 10^6
#'
#' Performs a conversion from base units to mega-units (ex. grams to megagrams).
#'
#' @param x Vector - Values in units of base units
#'
#' @return x, but converted to mega-units
#'
#' @references
#' NIST. Metric (SI) Prefixes. 2022. Accessed 4/7/2022.
#' https://www.nist.gov/pml/weights-and-measures/metric-si-prefixes
unitconversion.siprefix.base.to.mega <- function(
x = 1
) {
x / (10^6)
}
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.second.to.megasecond <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.s.to.Ms <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.meter.to.megameter <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.m.to.Mm <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.gram.to.megagram <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.g.to.Mg <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.ampere.to.megaampere <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.A.to.MA <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.kelvin.to.megakelvin <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.K.to.MK <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.mole.to.megamole <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.mol.to.Mmol <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.candela.to.megacandela <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.cd.to.Mcd <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.radian.to.megaradian <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.rad.to.Mrad <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.steradian.to.megasteradian <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.sr.to.Msr <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.hertz.to.megahertz <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.Hz.to.MHz <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.newton.to.meganewton <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.N.to.MN <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.pascal.to.megapascal <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.Pa.to.MPa <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.joule.to.megajoule <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.J.to.MJ <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.watt.to.megawatt <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.W.to.MW <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.coulomb.to.megacoulomb <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.C.to.MC <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.volt.to.megavolt <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.V.to.MV <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.farad.to.megafarad <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.F.to.MF <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.ohm.to.megaohm <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.siemens.to.megasiemens <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.S.to.MS <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.weber.to.megaweber <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.Wb.to.MWb <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.tesla.to.megatesla <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.T.to.MT <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.henry.to.megahenry <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.H.to.MH <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.lumen.to.megalumen <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.lm.to.Mlm <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.lux.to.megalux <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.lx.to.Mlx <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.becquerel.to.megabecquerel <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.Bq.to.MBq <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.gray.to.megagray <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.Gy.to.MGy <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.sievert.to.megasievert <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.Sv.to.MSv <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.katal.to.megakatal <- unitconversion.siprefix.base.to.mega
#' @rdname unitconversion.siprefix.base.to.mega
unitconversion.kat.to.Mkat <- unitconversion.siprefix.base.to.mega
|
# Copyright (C) 2013 - 2019 Metrum Research Group, LLC
#
# This file is part of mrgsolve.
#
# mrgsolve is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# mrgsolve is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mrgsolve. If not, see <http://www.gnu.org/licenses/>.
library(testthat)
library(mrgsolve)
library(dplyr)
Sys.setenv(R_TESTS="")
options("mrgsolve_mread_quiet"=TRUE)
context("test-rng")
mod <- mrgsolve:::house(omega=diag(c(1,1,1,1)))
ident <- function(x,y,...) {
identical(x,y)
}
test_that("Different seeds give different results without call to set.seed()", {
out1 <- mrgsim(mod %>% init(GUT=100), idata=data.frame(ID=1:20))
out2 <- mrgsim(mod %>% init(GUT=100), idata=data.frame(ID=1:20))
expect_false(ident(out1,out2))
})
test_that("Different seeds give different results with different calls to set.seed()", {
set.seed(112)
out1 <- mrgsim(mod %>% init(GUT=100), idata=data.frame(ID=1:20))
set.seed(333)
out2 <- mrgsim(mod %>% init(GUT=100), idata=data.frame(ID=1:20))
expect_false(ident(out1,out2))
})
test_that("Same seeds give same results with call to set.seed()", {
set.seed(112)
out1 <- mrgsim(mod %>% init(GUT=100), idata=data.frame(ID=1:20))
set.seed(112)
out2 <- mrgsim(mod %>% init(GUT=100), idata=data.frame(ID=1:20))
expect_true(ident(out1,out1))
})
| /data/genthat_extracted_code/mrgsolve/tests/test-rng.R | no_license | surayaaramli/typeRrh | R | false | false | 1,764 | r | # Copyright (C) 2013 - 2019 Metrum Research Group, LLC
#
# This file is part of mrgsolve.
#
# mrgsolve is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# mrgsolve is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mrgsolve. If not, see <http://www.gnu.org/licenses/>.
library(testthat)
library(mrgsolve)
library(dplyr)
Sys.setenv(R_TESTS="")
options("mrgsolve_mread_quiet"=TRUE)
context("test-rng")
mod <- mrgsolve:::house(omega=diag(c(1,1,1,1)))
ident <- function(x,y,...) {
identical(x,y)
}
test_that("Different seeds give different results without call to set.seed()", {
out1 <- mrgsim(mod %>% init(GUT=100), idata=data.frame(ID=1:20))
out2 <- mrgsim(mod %>% init(GUT=100), idata=data.frame(ID=1:20))
expect_false(ident(out1,out2))
})
test_that("Different seeds give different results with different calls to set.seed()", {
set.seed(112)
out1 <- mrgsim(mod %>% init(GUT=100), idata=data.frame(ID=1:20))
set.seed(333)
out2 <- mrgsim(mod %>% init(GUT=100), idata=data.frame(ID=1:20))
expect_false(ident(out1,out2))
})
test_that("Same seeds give same results with call to set.seed()", {
set.seed(112)
out1 <- mrgsim(mod %>% init(GUT=100), idata=data.frame(ID=1:20))
set.seed(112)
out2 <- mrgsim(mod %>% init(GUT=100), idata=data.frame(ID=1:20))
expect_true(ident(out1,out1))
})
|
library(mlbench)
data("Ionosphere")
dummy<-Ionosphere[ , c(sample(ncol(Ionosphere)-2,15), ncol(Ionosphere)) ]
#datos <- Ionosphere[,c(sample(ncol(Ionosphere)-1,15),ncol(Ionosphere))]
head(dummy)
help("trainControl")
#separo datos en train y test
datos<- Ionosphere[,c(sample(ncol(Ionosphere)-2,14),ncol(Ionosphere))]
inTraining <- createDataPartition(datos$Class, p = .75, list = FALSE)
train <- datos[ inTraining,]
test <- datos[-inTraining,]
#borro columnas en 0
training <- train[,apply(datos,2,function(x){all(x!=0)})]
testing <- test[,apply(datos,2,function(x){all(x!=0)})]
dim(train)
dim(datos)
###
###
help("train")
head(train)
###
data("iris")
TrainData <- iris[,1:4]
TrainClasses <- iris[,5]
head(TrainData)
head(TrainClasses)
#para generar todos los graficos en una misma grafica
#se intala la libreria grid extra
#se llama asi
#library(gridExtra)
#grid.arrange(plot1, plot2,...,plotn, ncol=3, nrow=3) --->ahi le paso de cuanto
#por cuanto quiero la grilla
help("par")
help("hclust")
help("train")
set.seed(123)
# Seleccionamos 15 filas de las 50 que tiene este dataset
data("USArrests")
head(USArrests)
ss <- sample(1:50, 15)
df <- USArrests[ss, ]
df
#Si quiero ver la estructuta interna de un dataset lo que uso es el comando str = structure
str(data)
####
help("hclust")
head(Ionosphere)
#
library(devtools)
library(nnet)
source('https://gist.githubusercontent.com/fawda123/7471137/raw/466c1474d0a505ff044412703516c34f1a4684a5/nnet_plot_update.r')
source_url('https://gist.githubusercontent.com/fawda123/7471137/raw/466c1474d0a505ff044412703516c34f1a4684a5/nnet_plot_update.r')
mlp1$finalModel
| /posgrado/clase-2/randomCmds.R | no_license | martinezmelisapamela/r-learning | R | false | false | 1,639 | r | library(mlbench)
data("Ionosphere")
dummy<-Ionosphere[ , c(sample(ncol(Ionosphere)-2,15), ncol(Ionosphere)) ]
#datos <- Ionosphere[,c(sample(ncol(Ionosphere)-1,15),ncol(Ionosphere))]
head(dummy)
help("trainControl")
#separo datos en train y test
datos<- Ionosphere[,c(sample(ncol(Ionosphere)-2,14),ncol(Ionosphere))]
inTraining <- createDataPartition(datos$Class, p = .75, list = FALSE)
train <- datos[ inTraining,]
test <- datos[-inTraining,]
#borro columnas en 0
training <- train[,apply(datos,2,function(x){all(x!=0)})]
testing <- test[,apply(datos,2,function(x){all(x!=0)})]
dim(train)
dim(datos)
###
###
help("train")
head(train)
###
data("iris")
TrainData <- iris[,1:4]
TrainClasses <- iris[,5]
head(TrainData)
head(TrainClasses)
#para generar todos los graficos en una misma grafica
#se intala la libreria grid extra
#se llama asi
#library(gridExtra)
#grid.arrange(plot1, plot2,...,plotn, ncol=3, nrow=3) --->ahi le paso de cuanto
#por cuanto quiero la grilla
help("par")
help("hclust")
help("train")
set.seed(123)
# Seleccionamos 15 filas de las 50 que tiene este dataset
data("USArrests")
head(USArrests)
ss <- sample(1:50, 15)
df <- USArrests[ss, ]
df
#Si quiero ver la estructuta interna de un dataset lo que uso es el comando str = structure
str(data)
####
help("hclust")
head(Ionosphere)
#
library(devtools)
library(nnet)
source('https://gist.githubusercontent.com/fawda123/7471137/raw/466c1474d0a505ff044412703516c34f1a4684a5/nnet_plot_update.r')
source_url('https://gist.githubusercontent.com/fawda123/7471137/raw/466c1474d0a505ff044412703516c34f1a4684a5/nnet_plot_update.r')
mlp1$finalModel
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.